hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3c2bbf993e5d9a61acc51620c1b3d80a4ee045 | 3,213 | py | Python | piat/servers/trap_server.py | Ali-aqrabawi/piat | 3f6744490e846a3a5db32affad9f89439bf5b7f2 | [
"MIT"
] | 7 | 2019-04-29T08:30:13.000Z | 2022-01-07T01:50:01.000Z | piat/servers/trap_server.py | Ali-aqrabawi/piat | 3f6744490e846a3a5db32affad9f89439bf5b7f2 | [
"MIT"
] | 3 | 2019-05-03T18:17:59.000Z | 2019-05-10T15:55:32.000Z | piat/servers/trap_server.py | Ali-aqrabawi/piat | 3f6744490e846a3a5db32affad9f89439bf5b7f2 | [
"MIT"
] | 2 | 2019-04-28T20:24:29.000Z | 2019-12-19T23:57:14.000Z | import os
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.smi import view, builder
from pysnmp.entity.rfc3413 import ntfrcv
from piat.utils.threads import ThreadsManager
from piat.utils.decorators import restart_on_failure
from piat.parsers.traps.trap import TrapMsg
from piat.utils.logger import get_logger
from piat.exceptions import PiatError
LOGGER = get_logger(__name__)
class TrapsHandler:
""" Trap Msg Handler"""
def __init__(self, callbacks, viewer):
self._callbacks = callbacks
self._viewer = viewer
LOGGER.info("registered %r callbacks to Trap server",
[func.__name__ for func in self._callbacks])
def handle(self, snmp_engine,
state_reference, context_engine_id,
context_name, var_binds, cb_ctx):
""" Msg method Handler """
LOGGER.debug('Notification from ContextEngineId "%s", ContextName "%s"',
context_engine_id.prettyPrint(),
context_name.prettyPrint())
trap_log = TrapMsg(var_binds, self._viewer)
proc_mgr = ThreadsManager()
for callback in self._callbacks:
proc_mgr.add(callback, args=[trap_log, ])
proc_mgr.start()
class SnmpTrapServer:
""" Snmp Trap Server """
def __init__(self,
callbacks,
community='public',
port=162,
add_mib_dir=''):
self._callbacks = callbacks
self._port = port
self._community = community
self._add_mib_dir = add_mib_dir
self._setup()
def _setup(self):
""" Setup Server """
assert isinstance(self._callbacks, list), \
"callbacks should be list of functions type not %s" % type(
self._callbacks)
snmp_engine = engine.SnmpEngine()
build = snmp_engine.getMibBuilder()
if self._add_mib_dir:
if not os.path.exists(self._add_mib_dir):
raise PiatError("mib dir does not exist, dir=%r" % self._add_mib_dir)
if not os.path.isdir(self._add_mib_dir):
raise PiatError("add_mib_dir should be a directory not a file, add_mib_dir=%r" % self._add_mib_dir)
build.addMibSources(builder.DirMibSource(self._add_mib_dir))
build.loadModules()
viewer = view.MibViewController(build)
# UDP over IPv4, first listening interface/port
transport = udp.UdpTransport()
config.addTransport(snmp_engine, udp.domainName + (1,), transport.openServerMode(('0.0.0.0', self._port)))
# SecurityName <-> CommunityName mapping
config.addV1System(snmp_engine, '????', self._community)
# Register SNMP Application at the SNMP engine
handler = TrapsHandler(self._callbacks, viewer)
ntfrcv.NotificationReceiver(snmp_engine, handler.handle)
self._snmpEngine = snmp_engine
@restart_on_failure
def start(self):
""" Start the Server"""
LOGGER.info("Trap service Started...")
self._snmpEngine.transportDispatcher.jobStarted(1)
self._snmpEngine.transportDispatcher.runDispatcher()
| 35.7 | 115 | 0.646748 | import os
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.smi import view, builder
from pysnmp.entity.rfc3413 import ntfrcv
from piat.utils.threads import ThreadsManager
from piat.utils.decorators import restart_on_failure
from piat.parsers.traps.trap import TrapMsg
from piat.utils.logger import get_logger
from piat.exceptions import PiatError
LOGGER = get_logger(__name__)
class TrapsHandler:
def __init__(self, callbacks, viewer):
self._callbacks = callbacks
self._viewer = viewer
LOGGER.info("registered %r callbacks to Trap server",
[func.__name__ for func in self._callbacks])
def handle(self, snmp_engine,
state_reference, context_engine_id,
context_name, var_binds, cb_ctx):
LOGGER.debug('Notification from ContextEngineId "%s", ContextName "%s"',
context_engine_id.prettyPrint(),
context_name.prettyPrint())
trap_log = TrapMsg(var_binds, self._viewer)
proc_mgr = ThreadsManager()
for callback in self._callbacks:
proc_mgr.add(callback, args=[trap_log, ])
proc_mgr.start()
class SnmpTrapServer:
def __init__(self,
callbacks,
community='public',
port=162,
add_mib_dir=''):
self._callbacks = callbacks
self._port = port
self._community = community
self._add_mib_dir = add_mib_dir
self._setup()
def _setup(self):
assert isinstance(self._callbacks, list), \
"callbacks should be list of functions type not %s" % type(
self._callbacks)
snmp_engine = engine.SnmpEngine()
build = snmp_engine.getMibBuilder()
if self._add_mib_dir:
if not os.path.exists(self._add_mib_dir):
raise PiatError("mib dir does not exist, dir=%r" % self._add_mib_dir)
if not os.path.isdir(self._add_mib_dir):
raise PiatError("add_mib_dir should be a directory not a file, add_mib_dir=%r" % self._add_mib_dir)
build.addMibSources(builder.DirMibSource(self._add_mib_dir))
build.loadModules()
viewer = view.MibViewController(build)
transport = udp.UdpTransport()
config.addTransport(snmp_engine, udp.domainName + (1,), transport.openServerMode(('0.0.0.0', self._port)))
config.addV1System(snmp_engine, '????', self._community)
handler = TrapsHandler(self._callbacks, viewer)
ntfrcv.NotificationReceiver(snmp_engine, handler.handle)
self._snmpEngine = snmp_engine
@restart_on_failure
def start(self):
LOGGER.info("Trap service Started...")
self._snmpEngine.transportDispatcher.jobStarted(1)
self._snmpEngine.transportDispatcher.runDispatcher()
| true | true |
1c3c2e9931e330b8af72364c8f069c6dee7b3a6a | 61 | py | Python | ex047.py | mateusloped/curso-python | 1b5b3927141e985911c9b2344b3d4d663a90c29c | [
"MIT"
] | null | null | null | ex047.py | mateusloped/curso-python | 1b5b3927141e985911c9b2344b3d4d663a90c29c | [
"MIT"
] | null | null | null | ex047.py | mateusloped/curso-python | 1b5b3927141e985911c9b2344b3d4d663a90c29c | [
"MIT"
] | null | null | null | for c in range(2, 51, 2):
print(c, end=' ')
print('FIM') | 20.333333 | 25 | 0.52459 | for c in range(2, 51, 2):
print(c, end=' ')
print('FIM') | true | true |
1c3c2ebbf2a88dc388bb0314813d8b32b385e4b0 | 3,133 | py | Python | rqalpha/data/instrument_mixin.py | mysky528/rqalpha | ecd550fc30aee96f9995e8152e2c48f5512f8b11 | [
"Apache-2.0"
] | 3 | 2017-07-11T15:37:24.000Z | 2021-11-22T14:21:13.000Z | rqalpha/data/instrument_mixin.py | mysky528/rqalpha | ecd550fc30aee96f9995e8152e2c48f5512f8b11 | [
"Apache-2.0"
] | null | null | null | rqalpha/data/instrument_mixin.py | mysky528/rqalpha | ecd550fc30aee96f9995e8152e2c48f5512f8b11 | [
"Apache-2.0"
] | 2 | 2019-04-26T07:51:08.000Z | 2020-12-01T20:59:04.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
class InstrumentMixin(object):
def __init__(self, instruments):
self._instruments = {i.order_book_id: i for i in instruments}
self._sym_id_map = {i.symbol: k for k, i in six.iteritems(self._instruments)
# 过滤掉 CSI300, SSE50, CSI500, SSE180
if not i.order_book_id.endswith('INDX')}
try:
# FIXME
# 沪深300 中证500 固定使用上证的
for o in ['000300.XSHG', '000905.XSHG']:
self._sym_id_map[self._instruments[o].symbol] = o
# 上证180 及 上证180指数 两个symbol都指向 000010.XSHG
self._sym_id_map[self._instruments['SSE180.INDX'].symbol] = '000010.XSHG'
except KeyError:
pass
def sector(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.sector_code == code]
def industry(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.industry_code == code]
def concept(self, *concepts):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and any(c in v.concept_names.split('|') for c in concepts)]
def all_instruments(self, types, dt=None):
return [i for i in self._instruments.values()
if ((dt is None or i.listed_date.date() <= dt.date() <= i.de_listed_date.date()) and
(types is None or i.type in types))]
def _instrument(self, sym_or_id):
try:
return self._instruments[sym_or_id]
except KeyError:
try:
sym_or_id = self._sym_id_map[sym_or_id]
return self._instruments[sym_or_id]
except KeyError:
return None
def instruments(self, sym_or_ids):
if isinstance(sym_or_ids, six.string_types):
return self._instrument(sym_or_ids)
return [i for i in [self._instrument(sid) for sid in sym_or_ids] if i is not None]
def get_future_contracts(self, underlying, date):
date = date.replace(hour=0, minute=0, second=0)
futures = [v for o, v in six.iteritems(self._instruments)
if v.type == 'Future' and v.underlying_symbol == underlying and
not o.endswith('88') and not o.endswith('99')]
if not futures:
return []
return sorted(i.order_book_id for i in futures if i.listed_date <= date <= i.de_listed_date)
| 40.166667 | 100 | 0.620172 |
import six
class InstrumentMixin(object):
def __init__(self, instruments):
self._instruments = {i.order_book_id: i for i in instruments}
self._sym_id_map = {i.symbol: k for k, i in six.iteritems(self._instruments)
if not i.order_book_id.endswith('INDX')}
try:
for o in ['000300.XSHG', '000905.XSHG']:
self._sym_id_map[self._instruments[o].symbol] = o
self._sym_id_map[self._instruments['SSE180.INDX'].symbol] = '000010.XSHG'
except KeyError:
pass
def sector(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.sector_code == code]
def industry(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.industry_code == code]
def concept(self, *concepts):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and any(c in v.concept_names.split('|') for c in concepts)]
def all_instruments(self, types, dt=None):
return [i for i in self._instruments.values()
if ((dt is None or i.listed_date.date() <= dt.date() <= i.de_listed_date.date()) and
(types is None or i.type in types))]
def _instrument(self, sym_or_id):
try:
return self._instruments[sym_or_id]
except KeyError:
try:
sym_or_id = self._sym_id_map[sym_or_id]
return self._instruments[sym_or_id]
except KeyError:
return None
def instruments(self, sym_or_ids):
if isinstance(sym_or_ids, six.string_types):
return self._instrument(sym_or_ids)
return [i for i in [self._instrument(sid) for sid in sym_or_ids] if i is not None]
def get_future_contracts(self, underlying, date):
date = date.replace(hour=0, minute=0, second=0)
futures = [v for o, v in six.iteritems(self._instruments)
if v.type == 'Future' and v.underlying_symbol == underlying and
not o.endswith('88') and not o.endswith('99')]
if not futures:
return []
return sorted(i.order_book_id for i in futures if i.listed_date <= date <= i.de_listed_date)
| true | true |
1c3c30b8afcf0d6188c24b6749e59cf45ef9a702 | 170 | py | Python | problem/01000~09999/04344/4344.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/04344/4344.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/04344/4344.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | for i in range(int(input())):
x=[*map(int,input().split())]
a=sum(x[1:])
p=a/x[0]
c=0
for i in range(1,len(x)):
if x[i]>p: c+=1
print('%.3f%%'%(c/(len(x)-1)*100)) | 21.25 | 35 | 0.511765 | for i in range(int(input())):
x=[*map(int,input().split())]
a=sum(x[1:])
p=a/x[0]
c=0
for i in range(1,len(x)):
if x[i]>p: c+=1
print('%.3f%%'%(c/(len(x)-1)*100)) | true | true |
1c3c30eaabdca35f894764b12da5eeff5b76e3e6 | 724 | py | Python | lab9.py | morganqr/ia241-github-1 | 212d0d67c5c6b8b46997bad370b17c2fe7584320 | [
"MIT"
] | null | null | null | lab9.py | morganqr/ia241-github-1 | 212d0d67c5c6b8b46997bad370b17c2fe7584320 | [
"MIT"
] | null | null | null | lab9.py | morganqr/ia241-github-1 | 212d0d67c5c6b8b46997bad370b17c2fe7584320 | [
"MIT"
] | null | null | null | '''
lab 9
'''
#3.1
class my_stat():
def cal_sigma(self,m,n):
result = 0
for i in range (n, m+1):
result = result +i
return result
def cal_pi(self,m,n):
result = 1
for i in range (n,m+1):
result = result*i
return result
def cal_f(self,m):
if m ==0:
return 1
else:
return m* self.cal_f(m-1)
def cal_p(self,m,n):
return self.cal_f(m)/self.cal_f(m-n)
#3.2
my_cal = my_stat()
print(my_cal.cal_sigma(5,3))
print(my_cal.cal_pi(5,3))
print(my_cal.cal_f(5))
print(my_cal.cal_p(5,2))
| 15.083333 | 44 | 0.44337 |
class my_stat():
def cal_sigma(self,m,n):
result = 0
for i in range (n, m+1):
result = result +i
return result
def cal_pi(self,m,n):
result = 1
for i in range (n,m+1):
result = result*i
return result
def cal_f(self,m):
if m ==0:
return 1
else:
return m* self.cal_f(m-1)
def cal_p(self,m,n):
return self.cal_f(m)/self.cal_f(m-n)
my_cal = my_stat()
print(my_cal.cal_sigma(5,3))
print(my_cal.cal_pi(5,3))
print(my_cal.cal_f(5))
print(my_cal.cal_p(5,2))
| true | true |
1c3c3220f3e1c306f67c1041d89c521be650fc0e | 5,715 | py | Python | autovectorization-tests/scripts/procedures.py | clayne/toys | ec06411e2d3b920403607888d4a573e41390ee5b | [
"BSD-2-Clause"
] | null | null | null | autovectorization-tests/scripts/procedures.py | clayne/toys | ec06411e2d3b920403607888d4a573e41390ee5b | [
"BSD-2-Clause"
] | null | null | null | autovectorization-tests/scripts/procedures.py | clayne/toys | ec06411e2d3b920403607888d4a573e41390ee5b | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
PROCEDURES = {
'accumulate_default.cpp' : {
'title' : "accumulate --- default",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/accumulate',
'procedures' : ["accumulate_epi8", "accumulate_epi32"],
},
'accumulate_custom.cpp' : {
'title' : "accumulate --- custom",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/accumulate',
'procedures' : ["accumulate_custom_epi8", "accumulate_custom_epi32"],
},
'all_of.cpp' : {
'title' : "all_of",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/all_any_none_of',
'procedures' : ["all_of_epi8", "all_of_epi32"],
},
'any_of.cpp' : {
'title' : "any_of",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/all_any_none_of',
'procedures' : ["any_of_epi8", "any_of_epi32"],
},
'copy.cpp' : {
'title' : "copy",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/copy',
'procedures' : ["copy_epi8", "copy_epi32"],
},
'copy_if.cpp' : {
'title' : "copy_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/copy',
'procedures' : ["copy_if_epi8", "copy_if_epi32"],
},
'count.cpp' : {
'title' : "count",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/count',
'procedures' : ["count_epi8", "count_epi32"],
},
'count_if.cpp' : {
'title' : "count_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/count',
'procedures' : ["count_if_epi8", "count_if_epi32"],
},
'fill.cpp' : {
'title' : "fill",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/fill',
'procedures' : ["fill_epi8", "fill_epi32"],
},
'find.cpp' : {
'title' : "find",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/find',
'procedures' : ["find_epi8", "find_epi32"],
},
'find_if.cpp' : {
'title' : "find_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/find',
'procedures' : ["find_if_epi8", "find_if_epi32"],
},
'is_sorted.cpp' : {
'title' : "is_sorted",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/is_sorted',
'procedures' : ["is_sorted_epi8", "is_sorted_epi32"],
},
'none_of.cpp' : {
'title' : "none_of",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/all_any_none_of',
'procedures' : ["none_of_epi8", "none_of_epi32"],
},
'remove.cpp' : {
'title' : "remove",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/remove',
'procedures' : ["remove_epi8", "remove_epi32"],
},
'remove_if.cpp' : {
'title' : "remove_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/remove',
'procedures' : ["remove_if_epi8", "remove_if_epi32"],
},
'replace.cpp' : {
'title' : "replace",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/replace',
'procedures' : ["replace_epi8", "replace_epi32"],
},
'replace_if.cpp' : {
'title' : "replace_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/replace',
'procedures' : ["replace_if_epi8", "replace_if_epi32"],
},
'reverse.cpp' : {
'title' : "reverse",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/reverse',
'procedures' : ["reverse_epi8", "reverse_epi32"],
},
'transform_abs.cpp' : {
'title' : "transform --- abs",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/transform',
'procedures' : ["transform_abs_epi8", "transform_abs_epi32"],
},
'transform_inc.cpp' : {
'title' : "transform --- increment",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/transform',
'procedures' : ["transform_inc_epi8", "transform_inc_epi32"],
},
'transform_neg.cpp' : {
'title' : "transform --- negation",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/transform',
'procedures' : ["transform_neg_epi8", "transform_neg_epi32"],
},
'unique.cpp' : {
'title' : "unique",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/unique',
'procedures' : ["unique_epi8", "unique_epi32"],
},
}
def print_checklist(file):
procedures = []
for item in PROCEDURES.values():
for procedure in item['procedures']:
procedures.append(procedure)
def writeln(s):
file.write(s)
file.write('\n')
writeln('compiler: ')
writeln('cmdline: ')
writeln('')
for procedure in sorted(procedures):
writeln('%s: ??' % procedure)
if __name__ == '__main__':
print_checklist(sys.stdout)
| 35.943396 | 92 | 0.472616 |
import sys
PROCEDURES = {
'accumulate_default.cpp' : {
'title' : "accumulate --- default",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/accumulate',
'procedures' : ["accumulate_epi8", "accumulate_epi32"],
},
'accumulate_custom.cpp' : {
'title' : "accumulate --- custom",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/accumulate',
'procedures' : ["accumulate_custom_epi8", "accumulate_custom_epi32"],
},
'all_of.cpp' : {
'title' : "all_of",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/all_any_none_of',
'procedures' : ["all_of_epi8", "all_of_epi32"],
},
'any_of.cpp' : {
'title' : "any_of",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/all_any_none_of',
'procedures' : ["any_of_epi8", "any_of_epi32"],
},
'copy.cpp' : {
'title' : "copy",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/copy',
'procedures' : ["copy_epi8", "copy_epi32"],
},
'copy_if.cpp' : {
'title' : "copy_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/copy',
'procedures' : ["copy_if_epi8", "copy_if_epi32"],
},
'count.cpp' : {
'title' : "count",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/count',
'procedures' : ["count_epi8", "count_epi32"],
},
'count_if.cpp' : {
'title' : "count_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/count',
'procedures' : ["count_if_epi8", "count_if_epi32"],
},
'fill.cpp' : {
'title' : "fill",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/fill',
'procedures' : ["fill_epi8", "fill_epi32"],
},
'find.cpp' : {
'title' : "find",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/find',
'procedures' : ["find_epi8", "find_epi32"],
},
'find_if.cpp' : {
'title' : "find_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/find',
'procedures' : ["find_if_epi8", "find_if_epi32"],
},
'is_sorted.cpp' : {
'title' : "is_sorted",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/is_sorted',
'procedures' : ["is_sorted_epi8", "is_sorted_epi32"],
},
'none_of.cpp' : {
'title' : "none_of",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/all_any_none_of',
'procedures' : ["none_of_epi8", "none_of_epi32"],
},
'remove.cpp' : {
'title' : "remove",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/remove',
'procedures' : ["remove_epi8", "remove_epi32"],
},
'remove_if.cpp' : {
'title' : "remove_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/remove',
'procedures' : ["remove_if_epi8", "remove_if_epi32"],
},
'replace.cpp' : {
'title' : "replace",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/replace',
'procedures' : ["replace_epi8", "replace_epi32"],
},
'replace_if.cpp' : {
'title' : "replace_if",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/replace',
'procedures' : ["replace_if_epi8", "replace_if_epi32"],
},
'reverse.cpp' : {
'title' : "reverse",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/reverse',
'procedures' : ["reverse_epi8", "reverse_epi32"],
},
'transform_abs.cpp' : {
'title' : "transform --- abs",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/transform',
'procedures' : ["transform_abs_epi8", "transform_abs_epi32"],
},
'transform_inc.cpp' : {
'title' : "transform --- increment",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/transform',
'procedures' : ["transform_inc_epi8", "transform_inc_epi32"],
},
'transform_neg.cpp' : {
'title' : "transform --- negation",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/transform',
'procedures' : ["transform_neg_epi8", "transform_neg_epi32"],
},
'unique.cpp' : {
'title' : "unique",
'link' : 'https://en.cppreference.com/w/cpp/algorithm/unique',
'procedures' : ["unique_epi8", "unique_epi32"],
},
}
def print_checklist(file):
procedures = []
for item in PROCEDURES.values():
for procedure in item['procedures']:
procedures.append(procedure)
def writeln(s):
file.write(s)
file.write('\n')
writeln('compiler: ')
writeln('cmdline: ')
writeln('')
for procedure in sorted(procedures):
writeln('%s: ??' % procedure)
if __name__ == '__main__':
print_checklist(sys.stdout)
| true | true |
1c3c322ab4a9cfca0ce7257199597e574f2a659c | 1,005 | py | Python | geotrek/api/mobile/urls.py | mviadere-openig/Geotrek-admin | 7f2343db50e97bb407e66dc499ab3684e1629661 | [
"BSD-2-Clause"
] | null | null | null | geotrek/api/mobile/urls.py | mviadere-openig/Geotrek-admin | 7f2343db50e97bb407e66dc499ab3684e1629661 | [
"BSD-2-Clause"
] | null | null | null | geotrek/api/mobile/urls.py | mviadere-openig/Geotrek-admin | 7f2343db50e97bb407e66dc499ab3684e1629661 | [
"BSD-2-Clause"
] | null | null | null | from django.conf import settings
from django.conf.urls import url, include
from rest_framework import routers
from geotrek.api.mobile import views as api_mobile
from geotrek.api.mobile.views_sync import SyncMobileRedirect, sync_mobile_view, sync_mobile_update_json
router = routers.DefaultRouter()
if 'geotrek.flatpages' in settings.INSTALLED_APPS:
router.register(r'flatpages', api_mobile.FlatPageViewSet, base_name='flatpage')
if 'geotrek.trekking' in settings.INSTALLED_APPS:
router.register(r'treks', api_mobile.TrekViewSet, base_name='treks')
urlpatterns = [
url(r'^$', api_mobile.SwaggerSchemaView.as_view(), name="schema"),
url(r'^', include(router.urls)),
url(r'^settings/$', api_mobile.SettingsView.as_view(), name='settings'),
url(r'^commands/sync$', SyncMobileRedirect.as_view(), name='sync_mobiles'),
url(r'^commands/syncview$', sync_mobile_view, name='sync_mobiles_view'),
url(r'^commands/statesync/$', sync_mobile_update_json, name='sync_mobiles_state'),
]
| 47.857143 | 103 | 0.768159 | from django.conf import settings
from django.conf.urls import url, include
from rest_framework import routers
from geotrek.api.mobile import views as api_mobile
from geotrek.api.mobile.views_sync import SyncMobileRedirect, sync_mobile_view, sync_mobile_update_json
router = routers.DefaultRouter()
if 'geotrek.flatpages' in settings.INSTALLED_APPS:
router.register(r'flatpages', api_mobile.FlatPageViewSet, base_name='flatpage')
if 'geotrek.trekking' in settings.INSTALLED_APPS:
router.register(r'treks', api_mobile.TrekViewSet, base_name='treks')
urlpatterns = [
url(r'^$', api_mobile.SwaggerSchemaView.as_view(), name="schema"),
url(r'^', include(router.urls)),
url(r'^settings/$', api_mobile.SettingsView.as_view(), name='settings'),
url(r'^commands/sync$', SyncMobileRedirect.as_view(), name='sync_mobiles'),
url(r'^commands/syncview$', sync_mobile_view, name='sync_mobiles_view'),
url(r'^commands/statesync/$', sync_mobile_update_json, name='sync_mobiles_state'),
]
| true | true |
1c3c32560d8042e900c70a08e3613c7017b3a716 | 17,106 | py | Python | Tests/test_PDB_MMCIF2Dict.py | chrisbarnettster/biopython | 778a8b113b8b9213eabf3a64bdaab3ecb7d57b7b | [
"BSD-3-Clause"
] | 1 | 2021-10-17T12:42:22.000Z | 2021-10-17T12:42:22.000Z | Tests/test_PDB_MMCIF2Dict.py | chrisbarnettster/biopython | 778a8b113b8b9213eabf3a64bdaab3ecb7d57b7b | [
"BSD-3-Clause"
] | null | null | null | Tests/test_PDB_MMCIF2Dict.py | chrisbarnettster/biopython | 778a8b113b8b9213eabf3a64bdaab3ecb7d57b7b | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017 by Francesco Gastaldello. All rights reserved.
# Revisions copyright 2017 by Peter Cock. All rights reserved.
#
# Converted by Francesco Gastaldello from an older unit test copyright 2002
# by Thomas Hamelryck.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Unit tests for the Bio.PDB.MMCIF2Dict module."""
import unittest
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.PDB."
) from None
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
import io
import textwrap
class MMCIF2dictTests(unittest.TestCase):
def test_MMCIF2dict(self):
filename = "PDB/1A8O.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(len(mmcif.keys()), 575)
# Turn black code style off
# fmt: off
self.assertEqual(
mmcif["_entity_poly_seq.mon_id"],
[
"MSE", "ASP", "ILE", "ARG", "GLN", "GLY", "PRO", "LYS", "GLU", "PRO",
"PHE", "ARG", "ASP", "TYR", "VAL", "ASP", "ARG", "PHE", "TYR", "LYS",
"THR", "LEU", "ARG", "ALA", "GLU", "GLN", "ALA", "SER", "GLN", "GLU",
"VAL", "LYS", "ASN", "TRP", "MSE", "THR", "GLU", "THR", "LEU", "LEU",
"VAL", "GLN", "ASN", "ALA", "ASN", "PRO", "ASP", "CYS", "LYS", "THR",
"ILE", "LEU", "LYS", "ALA", "LEU", "GLY", "PRO", "GLY", "ALA", "THR",
"LEU", "GLU", "GLU", "MSE", "MSE", "THR", "ALA", "CYS", "GLN", "GLY",
]
)
self.assertEqual(
mmcif["_atom_site.Cartn_x"],
[
"19.594", "20.255", "20.351", "19.362", "19.457", "20.022", "21.718",
"21.424", "21.554", "21.835", "21.947", "21.678", "23.126", "23.098",
"23.433", "22.749", "22.322", "22.498", "21.220", "20.214", "23.062",
"24.282", "23.423", "25.429", "21.280", "20.173", "20.766", "21.804",
"19.444", "18.724", "18.011", "17.416", "16.221", "15.459", "15.824",
"20.116", "20.613", "20.546", "19.488", "19.837", "20.385", "19.526",
"18.365", "20.090", "21.675", "21.698", "20.859", "20.729", "20.260",
"19.435", "20.158", "19.512", "18.993", "20.056", "20.300", "21.486",
"22.285", "23.286", "24.155", "23.025", "22.117", "21.236", "20.159",
"19.231", "23.152", "24.037", "23.563", "22.398", "24.086", "25.003",
"24.858", "23.861", "25.748", "24.459", "24.089", "23.580", "24.111",
"25.415", "26.116", "25.852", "22.544", "21.960", "22.965", "22.928",
"20.793", "19.999", "19.234", "20.019", "18.495", "19.286", "18.523",
"23.861", "24.870", "25.788", "26.158", "25.684", "26.777", "26.215",
"27.235", "28.136", "28.155", "29.030", "26.137", "26.994", "26.279",
"26.880", "27.408", "28.345", "28.814", "28.620", "24.992", "24.151",
"24.025", "24.139", "22.787", "21.629", "21.657", "20.489", "20.571",
"19.408", "19.450", "18.365", "23.839", "23.720", "24.962", "24.853",
"23.502", "23.661", "22.120", "26.137", "27.387", "27.511", "27.925",
"28.595", "28.723", "28.016", "29.545", "27.136", "27.202", "26.238",
"26.585", "26.850", "27.835", "27.667", "26.352", "25.494", "25.797",
"24.325", "25.037", "23.984", "24.456", "24.305", "22.761", "21.538",
"21.301", "20.586", "20.130", "19.415", "19.186", "25.033", "25.526",
"26.755", "27.015", "25.771", "24.608", "23.508", "24.583", "22.406",
"23.490", "22.406", "21.326", "27.508", "28.691", "28.183", "28.705",
"29.455", "30.787", "31.428", "32.618", "33.153", "27.116", "26.508",
"25.826", "25.827", "25.475", "26.150", "24.741", "25.264", "24.587",
"25.587", "25.302", "23.789", "22.707", "21.787", "21.910", "26.767",
"27.806", "28.299", "28.656", "29.006", "28.944", "30.295", "30.744",
"30.326", "29.441", "30.787", "28.332", "28.789", "27.943", "28.374",
"28.803", "26.740", "25.833", "25.775", "24.998", "24.425", "24.354",
"24.816", "24.535", "25.454", "26.601", "26.645", "25.240", "24.885",
"27.391", "28.884", "29.200", "28.729", "29.998", "24.438", "23.066",
"23.001", "23.824", "22.370", "22.035", "21.831", "21.174", "20.852",
"20.917", "19.638", "20.949", "20.315", "18.908", "18.539", "20.262",
"19.688", "20.414", "21.592", "19.714", "18.136", "16.775", "16.738",
"15.875", "16.101", "15.478", "14.341", "13.247", "14.542", "17.668",
"17.730", "18.064", "17.491", "18.754", "18.932", "18.279", "18.971",
"19.343", "18.126", "17.905", "20.444", "21.777", "22.756", "24.069",
"24.913", "17.344", "16.136", "15.146", "14.599", "15.468", "16.242",
"17.164", "15.865", "14.932", "14.017", "14.495", "13.700", "13.904",
"13.254", "12.332", "13.484", "11.975", "12.666", "14.303", "12.641",
"14.280", "13.452", "15.793", "16.368", "16.285", "16.053", "17.815",
"17.939", "17.221", "18.427", "16.438", "16.375", "14.950", "14.778",
"16.869", "18.228", "16.791", "13.947", "12.529", "12.045", "11.151",
"11.625", "11.950", "11.054", "11.086", "10.326", "12.589", "12.177",
"13.076", "12.888", "11.978", "13.202", "10.883", "14.054", "14.963",
"15.702", "15.846", "15.935", "15.286", "16.327", "14.580", "16.162",
"16.876", "15.961", "16.391", "17.402", "18.238", "19.553", "18.506",
"14.695", "13.703", "13.270", "13.262", "12.460", "11.372", "12.854",
"12.954", "12.503", "13.541", "13.184", "12.008", "10.830", "10.505",
"10.626", "10.093", "14.820", "15.887", "16.443", "17.416", "17.014",
"16.627", "15.451", "17.619", "15.830", "16.248", "15.758", "14.809",
"15.689", "16.404", "16.005", "14.639", "14.122", "17.109", "17.396",
"16.559", "18.588", "14.018", "12.706", "12.516", "11.536", "12.617",
"13.288", "14.522", "13.454", "13.383", "13.351", "12.406", "14.564",
"14.482", "13.353", "15.552", "14.378", "14.488", "13.443", "12.968",
"15.902", "16.144", "13.061", "12.087", "10.746", "10.157", "11.879",
"11.014", "11.003", "10.171", "10.269", "10.273", "9.002", "9.101",
"8.227", "8.612", "8.611", "7.224", "10.191", "10.458", "10.518",
"9.916", "11.791", "11.677", "12.184", "12.967", "11.222", "11.377",
"10.082", "9.885", "12.416", "13.824", "14.764", "14.287", "9.214",
"7.937", "7.048", "6.294", "7.230", "7.828", "7.618", "8.090",
"7.916", "7.189", "6.419", "6.871", "6.391", "6.449", "7.815",
"8.305", "7.481", "7.371", "9.788", "10.832", "12.217", "10.789",
"6.886", "6.080", "6.922", "8.149", "6.294", "7.024", "7.912",
"7.680", "5.901", "4.734", "4.839", "8.952", "9.861", "10.886",
"11.642", "10.910", "11.884", "13.285", "13.524", "11.599", "14.199",
"15.563", "16.391", "16.022", "16.290", "16.498", "15.473", "17.509",
"18.426", "18.875", "19.012", "19.645", "20.773", "20.264", "21.920",
"19.082", "19.510", "18.471", "18.816", "19.784", "21.035", "20.954",
"19.902", "21.955", "17.199", "16.109", "16.001", "15.690", "14.787",
"14.776", "13.539", "13.220", "12.888", "16.301", "16.274", "17.413",
"17.209", "16.429", "15.284", "15.332", "13.844", "18.606", "19.764",
"19.548", "19.922", "21.047", "21.507", "23.105", "22.645", "18.915",
"18.636", "17.640", "17.807", "18.050", "18.998", "17.730", "16.631",
"15.593", "16.104", "15.685", "14.486", "17.033", "17.572", "18.985",
"19.634", "17.525", "15.855", "19.451", "20.802", "21.001", "20.066",
"21.152", "20.421", "20.725", "21.768", "19.817", "22.226", "22.536",
"23.683", "24.328", "23.949", "15.165", "19.774", "22.152", "12.938",
"23.499", "17.568", "13.544", "15.524", "31.249", "11.999", "14.511",
"7.439", "19.303", "17.114", "21.867", "17.573", "26.151", "20.974",
"20.796", "28.370", "29.565", "21.248", "25.744", "8.691", "30.789",
"30.905", "28.623", "24.935", "23.462", "9.924", "28.729", "13.579",
"23.652", "25.631", "17.799", "23.547", "16.363", "24.125", "33.063",
"29.209", "10.391", "12.221", "18.997", "16.360", "27.915", "28.158",
"21.975", "27.069", "30.148", "21.196", "8.864", "13.228", "18.577",
"20.526", "25.758", "7.838", "20.569", "13.009", "19.229", "17.655",
"30.445", "9.014", "3.398", "31.603", "16.543", "12.037", "7.261",
"5.607", "23.532", "30.701", "32.300", "34.351", "9.450", "29.476",
"13.681", "26.728", "10.004", "30.553", "23.569", "10.927", "17.983",
"8.191", "32.095", "11.520", "13.249", "15.919", "11.187", "16.743",
]
)
# Turn black code style on
# fmt: on
self.assertEqual(
mmcif["_struct_ref.pdbx_seq_one_letter_code"],
[
"GARASVLSGGELDKWEKIRLRPGGKKQYKLKHIVWASRELERFAVNPGLLETSEGCRQILGQLQPSLQTG"
"SEELRSLYNT\n"
"IAVLYCVHQRIDVKDTKEALDKIEEEQNKSKKKAQQAAADTGNNSQVSQNYPIVQNLQGQMVHQAISPRT"
"LNAWVKVVEE\n"
"KAFSPEVIPMFSALSEGATPQDLNTMLNTVGGHQAAMQMLKETINEEAAEWDRLHPVHAGPIAPGQMREP"
"RGSDIAGTTS\n"
"TLQEQIGWMTHNPPIPVGEIYKRWIILGLNKIVRMYSPTSILDIRQGPKEPFRDYVDRFYKTLRAEQASQ"
"EVKNWMTETL\n"
"LVQNANPDCKTILKALGPGATLEEMMTACQGVGGPGHKARVLAEAMSQVTNPATIMIQKGNFRNQRKTVK"
"CFNCGKEGHI\n"
"AKNCRAPRKKGCWKCGKEGHQMKDCTERQANFLGKIWPSHKGRPGNFLQSRPEPTAPPEESFRFGEETTT"
"PSQKQEPIDK\n"
"ELYPLASLRSLFGSDPSSQ"
],
)
def test_underscores(self):
# Test values starting with an underscore are not treated as keys
filename = "PDB/4Q9R_min.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(len(mmcif.keys()), 5)
self.assertEqual(
mmcif["_pdbx_audit_revision_item.item"],
[
"_atom_site.B_iso_or_equiv",
"_atom_site.Cartn_x",
"_atom_site.Cartn_y",
"_atom_site.Cartn_z",
],
)
def test_quotefix(self):
# Test quote characters parse correctly
filename = "PDB/1MOM_min.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(len(mmcif.keys()), 21)
self.assertEqual(
mmcif["_struct_conf.pdbx_PDB_helix_id"],
[
"A",
"A'",
"B",
"C",
"B'",
"D",
"E",
"C'",
"F",
"G",
"H",
"D'",
"E'",
"A'\"",
"BC",
"CD",
"DE",
],
)
def test_splitline(self):
filename = "PDB/4Q9R_min.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(list(mmcif._splitline("foo bar")), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline(" foo bar ")), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline("'foo' bar")), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline('foo "bar"')), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline("foo 'bar a' b")), ["foo", "bar a", "b"])
self.assertEqual(list(mmcif._splitline("foo 'bar'a' b")), ["foo", "bar'a", "b"])
self.assertEqual(
list(mmcif._splitline('foo "bar\' a" b')), ["foo", "bar' a", "b"]
)
self.assertEqual(list(mmcif._splitline("foo '' b")), ["foo", "", "b"])
self.assertEqual(list(mmcif._splitline("foo bar' b")), ["foo", "bar'", "b"])
self.assertEqual(list(mmcif._splitline("foo bar b'")), ["foo", "bar", "b'"])
# A hash (#) starts a comment iff it is preceded by whitespace or is at
# the beginning of a line:
# https://www.iucr.org/resources/cif/spec/version1.1/cifsyntax#lex
self.assertEqual(list(mmcif._splitline("foo#bar")), ["foo#bar"])
self.assertEqual(list(mmcif._splitline("foo #bar")), ["foo"])
self.assertEqual(list(mmcif._splitline("foo# bar")), ["foo#", "bar"])
self.assertEqual(list(mmcif._splitline("#foo bar")), [])
self.assertRaises(ValueError, list, mmcif._splitline("foo 'bar"))
self.assertRaises(ValueError, list, mmcif._splitline("foo 'ba'r "))
self.assertRaises(ValueError, list, mmcif._splitline("foo \"bar'"))
# quotes are allowed if not followed by whitespace
self.assertEqual(list(mmcif._splitline("foo b'ar'")), ["foo", "b'ar'"])
self.assertEqual(list(mmcif._splitline("foo 'b'ar'")), ["foo", "b'ar"])
def test_verbatim_block(self):
"""Verbatim blocks parsed correctly.
Verbatim blocks delimited by ";...;" should have the final newline
stripped. Whitespace may be stripped from the end of the line but not
the beginning.
"""
mmcif_dict = MMCIF2Dict(
io.StringIO(
"data_verbatim_test\n"
"_test_value\n"
";First line\n"
" Second line\n"
"Third line\n"
";\n"
)
)
self.assertEqual(
mmcif_dict["_test_value"], ["First line\n Second line\nThird line"]
)
def test_token_after_multiline(self):
"""Multi-line string followed by token on the same line."""
stream = io.StringIO("data_test _key1\n"
";foo bar\n"
"; _key2 'value 2'\n")
mmcif_dict = MMCIF2Dict(stream)
self.assertEqual(mmcif_dict, {
"data_": "test",
"_key1": ["foo bar"],
"_key2": ["value 2"],
})
stream = io.StringIO("data_test _key1\n"
";foo bar\n"
";# missing space here")
with self.assertRaisesRegex(ValueError, "Missing whitespace"):
mmcif_dict = MMCIF2Dict(stream)
def test_truncated_multiline(self):
stream = io.StringIO("data_test\n_key1\n;foo bar\n")
with self.assertRaisesRegex(ValueError, "Missing closing semicolon"):
mmcif_dict = MMCIF2Dict(stream)
def test_inline_comments(self):
"""Comments may begin outside of column 1 if preceded by whitespace."""
mmcif_dict = MMCIF2Dict(
io.StringIO(
"data_verbatim_test\n"
"_test_key_value_1 foo # Ignore this comment\n"
"_test_key_value_2 foo#NotIgnored\n"
"loop_\n"
"_test_loop\n"
"a b c d # Ignore this comment\n"
"e f g\n"
"\n"
)
)
self.assertEqual(mmcif_dict["_test_key_value_1"], ["foo"])
self.assertEqual(mmcif_dict["_test_key_value_2"], ["foo#NotIgnored"])
self.assertEqual(mmcif_dict["_test_loop"], list("abcdefg"))
def test_loop_keyword_case_insensitive(self):
"""Comments may begin outside of column 1."""
test_data = """\
data_verbatim_test
_test_key_value foo # Ignore this comment
loop_
_test_loop
a b c d # Ignore this comment
e f g
"""
mmcif_dict = MMCIF2Dict(io.StringIO(textwrap.dedent(test_data)))
mmcif_dict2 = MMCIF2Dict(
io.StringIO(textwrap.dedent(test_data.replace("loop_", "LOOP_")))
)
self.assertDictEqual(mmcif_dict, mmcif_dict2)
mmcif_dict2 = MMCIF2Dict(
io.StringIO(textwrap.dedent(test_data.replace("loop_", "looP_")))
)
self.assertDictEqual(mmcif_dict, mmcif_dict2)
mmcif_dict2 = MMCIF2Dict(
io.StringIO(textwrap.dedent(test_data.replace("_loop", "_LOOP")))
)
self.assertNotEqual(mmcif_dict, mmcif_dict2)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 50.910714 | 88 | 0.483281 |
import unittest
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.PDB."
) from None
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
import io
import textwrap
class MMCIF2dictTests(unittest.TestCase):
def test_MMCIF2dict(self):
filename = "PDB/1A8O.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(len(mmcif.keys()), 575)
self.assertEqual(
mmcif["_entity_poly_seq.mon_id"],
[
"MSE", "ASP", "ILE", "ARG", "GLN", "GLY", "PRO", "LYS", "GLU", "PRO",
"PHE", "ARG", "ASP", "TYR", "VAL", "ASP", "ARG", "PHE", "TYR", "LYS",
"THR", "LEU", "ARG", "ALA", "GLU", "GLN", "ALA", "SER", "GLN", "GLU",
"VAL", "LYS", "ASN", "TRP", "MSE", "THR", "GLU", "THR", "LEU", "LEU",
"VAL", "GLN", "ASN", "ALA", "ASN", "PRO", "ASP", "CYS", "LYS", "THR",
"ILE", "LEU", "LYS", "ALA", "LEU", "GLY", "PRO", "GLY", "ALA", "THR",
"LEU", "GLU", "GLU", "MSE", "MSE", "THR", "ALA", "CYS", "GLN", "GLY",
]
)
self.assertEqual(
mmcif["_atom_site.Cartn_x"],
[
"19.594", "20.255", "20.351", "19.362", "19.457", "20.022", "21.718",
"21.424", "21.554", "21.835", "21.947", "21.678", "23.126", "23.098",
"23.433", "22.749", "22.322", "22.498", "21.220", "20.214", "23.062",
"24.282", "23.423", "25.429", "21.280", "20.173", "20.766", "21.804",
"19.444", "18.724", "18.011", "17.416", "16.221", "15.459", "15.824",
"20.116", "20.613", "20.546", "19.488", "19.837", "20.385", "19.526",
"18.365", "20.090", "21.675", "21.698", "20.859", "20.729", "20.260",
"19.435", "20.158", "19.512", "18.993", "20.056", "20.300", "21.486",
"22.285", "23.286", "24.155", "23.025", "22.117", "21.236", "20.159",
"19.231", "23.152", "24.037", "23.563", "22.398", "24.086", "25.003",
"24.858", "23.861", "25.748", "24.459", "24.089", "23.580", "24.111",
"25.415", "26.116", "25.852", "22.544", "21.960", "22.965", "22.928",
"20.793", "19.999", "19.234", "20.019", "18.495", "19.286", "18.523",
"23.861", "24.870", "25.788", "26.158", "25.684", "26.777", "26.215",
"27.235", "28.136", "28.155", "29.030", "26.137", "26.994", "26.279",
"26.880", "27.408", "28.345", "28.814", "28.620", "24.992", "24.151",
"24.025", "24.139", "22.787", "21.629", "21.657", "20.489", "20.571",
"19.408", "19.450", "18.365", "23.839", "23.720", "24.962", "24.853",
"23.502", "23.661", "22.120", "26.137", "27.387", "27.511", "27.925",
"28.595", "28.723", "28.016", "29.545", "27.136", "27.202", "26.238",
"26.585", "26.850", "27.835", "27.667", "26.352", "25.494", "25.797",
"24.325", "25.037", "23.984", "24.456", "24.305", "22.761", "21.538",
"21.301", "20.586", "20.130", "19.415", "19.186", "25.033", "25.526",
"26.755", "27.015", "25.771", "24.608", "23.508", "24.583", "22.406",
"23.490", "22.406", "21.326", "27.508", "28.691", "28.183", "28.705",
"29.455", "30.787", "31.428", "32.618", "33.153", "27.116", "26.508",
"25.826", "25.827", "25.475", "26.150", "24.741", "25.264", "24.587",
"25.587", "25.302", "23.789", "22.707", "21.787", "21.910", "26.767",
"27.806", "28.299", "28.656", "29.006", "28.944", "30.295", "30.744",
"30.326", "29.441", "30.787", "28.332", "28.789", "27.943", "28.374",
"28.803", "26.740", "25.833", "25.775", "24.998", "24.425", "24.354",
"24.816", "24.535", "25.454", "26.601", "26.645", "25.240", "24.885",
"27.391", "28.884", "29.200", "28.729", "29.998", "24.438", "23.066",
"23.001", "23.824", "22.370", "22.035", "21.831", "21.174", "20.852",
"20.917", "19.638", "20.949", "20.315", "18.908", "18.539", "20.262",
"19.688", "20.414", "21.592", "19.714", "18.136", "16.775", "16.738",
"15.875", "16.101", "15.478", "14.341", "13.247", "14.542", "17.668",
"17.730", "18.064", "17.491", "18.754", "18.932", "18.279", "18.971",
"19.343", "18.126", "17.905", "20.444", "21.777", "22.756", "24.069",
"24.913", "17.344", "16.136", "15.146", "14.599", "15.468", "16.242",
"17.164", "15.865", "14.932", "14.017", "14.495", "13.700", "13.904",
"13.254", "12.332", "13.484", "11.975", "12.666", "14.303", "12.641",
"14.280", "13.452", "15.793", "16.368", "16.285", "16.053", "17.815",
"17.939", "17.221", "18.427", "16.438", "16.375", "14.950", "14.778",
"16.869", "18.228", "16.791", "13.947", "12.529", "12.045", "11.151",
"11.625", "11.950", "11.054", "11.086", "10.326", "12.589", "12.177",
"13.076", "12.888", "11.978", "13.202", "10.883", "14.054", "14.963",
"15.702", "15.846", "15.935", "15.286", "16.327", "14.580", "16.162",
"16.876", "15.961", "16.391", "17.402", "18.238", "19.553", "18.506",
"14.695", "13.703", "13.270", "13.262", "12.460", "11.372", "12.854",
"12.954", "12.503", "13.541", "13.184", "12.008", "10.830", "10.505",
"10.626", "10.093", "14.820", "15.887", "16.443", "17.416", "17.014",
"16.627", "15.451", "17.619", "15.830", "16.248", "15.758", "14.809",
"15.689", "16.404", "16.005", "14.639", "14.122", "17.109", "17.396",
"16.559", "18.588", "14.018", "12.706", "12.516", "11.536", "12.617",
"13.288", "14.522", "13.454", "13.383", "13.351", "12.406", "14.564",
"14.482", "13.353", "15.552", "14.378", "14.488", "13.443", "12.968",
"15.902", "16.144", "13.061", "12.087", "10.746", "10.157", "11.879",
"11.014", "11.003", "10.171", "10.269", "10.273", "9.002", "9.101",
"8.227", "8.612", "8.611", "7.224", "10.191", "10.458", "10.518",
"9.916", "11.791", "11.677", "12.184", "12.967", "11.222", "11.377",
"10.082", "9.885", "12.416", "13.824", "14.764", "14.287", "9.214",
"7.937", "7.048", "6.294", "7.230", "7.828", "7.618", "8.090",
"7.916", "7.189", "6.419", "6.871", "6.391", "6.449", "7.815",
"8.305", "7.481", "7.371", "9.788", "10.832", "12.217", "10.789",
"6.886", "6.080", "6.922", "8.149", "6.294", "7.024", "7.912",
"7.680", "5.901", "4.734", "4.839", "8.952", "9.861", "10.886",
"11.642", "10.910", "11.884", "13.285", "13.524", "11.599", "14.199",
"15.563", "16.391", "16.022", "16.290", "16.498", "15.473", "17.509",
"18.426", "18.875", "19.012", "19.645", "20.773", "20.264", "21.920",
"19.082", "19.510", "18.471", "18.816", "19.784", "21.035", "20.954",
"19.902", "21.955", "17.199", "16.109", "16.001", "15.690", "14.787",
"14.776", "13.539", "13.220", "12.888", "16.301", "16.274", "17.413",
"17.209", "16.429", "15.284", "15.332", "13.844", "18.606", "19.764",
"19.548", "19.922", "21.047", "21.507", "23.105", "22.645", "18.915",
"18.636", "17.640", "17.807", "18.050", "18.998", "17.730", "16.631",
"15.593", "16.104", "15.685", "14.486", "17.033", "17.572", "18.985",
"19.634", "17.525", "15.855", "19.451", "20.802", "21.001", "20.066",
"21.152", "20.421", "20.725", "21.768", "19.817", "22.226", "22.536",
"23.683", "24.328", "23.949", "15.165", "19.774", "22.152", "12.938",
"23.499", "17.568", "13.544", "15.524", "31.249", "11.999", "14.511",
"7.439", "19.303", "17.114", "21.867", "17.573", "26.151", "20.974",
"20.796", "28.370", "29.565", "21.248", "25.744", "8.691", "30.789",
"30.905", "28.623", "24.935", "23.462", "9.924", "28.729", "13.579",
"23.652", "25.631", "17.799", "23.547", "16.363", "24.125", "33.063",
"29.209", "10.391", "12.221", "18.997", "16.360", "27.915", "28.158",
"21.975", "27.069", "30.148", "21.196", "8.864", "13.228", "18.577",
"20.526", "25.758", "7.838", "20.569", "13.009", "19.229", "17.655",
"30.445", "9.014", "3.398", "31.603", "16.543", "12.037", "7.261",
"5.607", "23.532", "30.701", "32.300", "34.351", "9.450", "29.476",
"13.681", "26.728", "10.004", "30.553", "23.569", "10.927", "17.983",
"8.191", "32.095", "11.520", "13.249", "15.919", "11.187", "16.743",
]
)
self.assertEqual(
mmcif["_struct_ref.pdbx_seq_one_letter_code"],
[
"GARASVLSGGELDKWEKIRLRPGGKKQYKLKHIVWASRELERFAVNPGLLETSEGCRQILGQLQPSLQTG"
"SEELRSLYNT\n"
"IAVLYCVHQRIDVKDTKEALDKIEEEQNKSKKKAQQAAADTGNNSQVSQNYPIVQNLQGQMVHQAISPRT"
"LNAWVKVVEE\n"
"KAFSPEVIPMFSALSEGATPQDLNTMLNTVGGHQAAMQMLKETINEEAAEWDRLHPVHAGPIAPGQMREP"
"RGSDIAGTTS\n"
"TLQEQIGWMTHNPPIPVGEIYKRWIILGLNKIVRMYSPTSILDIRQGPKEPFRDYVDRFYKTLRAEQASQ"
"EVKNWMTETL\n"
"LVQNANPDCKTILKALGPGATLEEMMTACQGVGGPGHKARVLAEAMSQVTNPATIMIQKGNFRNQRKTVK"
"CFNCGKEGHI\n"
"AKNCRAPRKKGCWKCGKEGHQMKDCTERQANFLGKIWPSHKGRPGNFLQSRPEPTAPPEESFRFGEETTT"
"PSQKQEPIDK\n"
"ELYPLASLRSLFGSDPSSQ"
],
)
def test_underscores(self):
filename = "PDB/4Q9R_min.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(len(mmcif.keys()), 5)
self.assertEqual(
mmcif["_pdbx_audit_revision_item.item"],
[
"_atom_site.B_iso_or_equiv",
"_atom_site.Cartn_x",
"_atom_site.Cartn_y",
"_atom_site.Cartn_z",
],
)
def test_quotefix(self):
filename = "PDB/1MOM_min.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(len(mmcif.keys()), 21)
self.assertEqual(
mmcif["_struct_conf.pdbx_PDB_helix_id"],
[
"A",
"A'",
"B",
"C",
"B'",
"D",
"E",
"C'",
"F",
"G",
"H",
"D'",
"E'",
"A'\"",
"BC",
"CD",
"DE",
],
)
def test_splitline(self):
filename = "PDB/4Q9R_min.cif"
mmcif = MMCIF2Dict(filename)
self.assertEqual(list(mmcif._splitline("foo bar")), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline(" foo bar ")), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline("'foo' bar")), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline('foo "bar"')), ["foo", "bar"])
self.assertEqual(list(mmcif._splitline("foo 'bar a' b")), ["foo", "bar a", "b"])
self.assertEqual(list(mmcif._splitline("foo 'bar'a' b")), ["foo", "bar'a", "b"])
self.assertEqual(
list(mmcif._splitline('foo "bar\' a" b')), ["foo", "bar' a", "b"]
)
self.assertEqual(list(mmcif._splitline("foo '' b")), ["foo", "", "b"])
self.assertEqual(list(mmcif._splitline("foo bar' b")), ["foo", "bar'", "b"])
self.assertEqual(list(mmcif._splitline("foo bar b'")), ["foo", "bar", "b'"])
# A hash (#) starts a comment iff it is preceded by whitespace or is at
# the beginning of a line:
# https://www.iucr.org/resources/cif/spec/version1.1/cifsyntax#lex
self.assertEqual(list(mmcif._splitline("foo self.assertEqual(list(mmcif._splitline("foo
self.assertEqual(list(mmcif._splitline("foolf.assertEqual(list(mmcif._splitline("
self.assertRaises(ValueError, list, mmcif._splitline("foo 'bar"))
self.assertRaises(ValueError, list, mmcif._splitline("foo 'ba'r "))
self.assertRaises(ValueError, list, mmcif._splitline("foo \"bar'"))
self.assertEqual(list(mmcif._splitline("foo b'ar'")), ["foo", "b'ar'"])
self.assertEqual(list(mmcif._splitline("foo 'b'ar'")), ["foo", "b'ar"])
def test_verbatim_block(self):
mmcif_dict = MMCIF2Dict(
io.StringIO(
"data_verbatim_test\n"
"_test_value\n"
";First line\n"
" Second line\n"
"Third line\n"
";\n"
)
)
self.assertEqual(
mmcif_dict["_test_value"], ["First line\n Second line\nThird line"]
)
def test_token_after_multiline(self):
stream = io.StringIO("data_test _key1\n"
";foo bar\n"
"; _key2 'value 2'\n")
mmcif_dict = MMCIF2Dict(stream)
self.assertEqual(mmcif_dict, {
"data_": "test",
"_key1": ["foo bar"],
"_key2": ["value 2"],
})
stream = io.StringIO("data_test _key1\n"
";foo bar\n"
";# missing space here")
with self.assertRaisesRegex(ValueError, "Missing whitespace"):
mmcif_dict = MMCIF2Dict(stream)
def test_truncated_multiline(self):
stream = io.StringIO("data_test\n_key1\n;foo bar\n")
with self.assertRaisesRegex(ValueError, "Missing closing semicolon"):
mmcif_dict = MMCIF2Dict(stream)
def test_inline_comments(self):
mmcif_dict = MMCIF2Dict(
io.StringIO(
"data_verbatim_test\n"
"_test_key_value_1 foo # Ignore this comment\n"
"_test_key_value_2 foo#NotIgnored\n"
"loop_\n"
"_test_loop\n"
"a b c d # Ignore this comment\n"
"e f g\n"
"\n"
)
)
self.assertEqual(mmcif_dict["_test_key_value_1"], ["foo"])
self.assertEqual(mmcif_dict["_test_key_value_2"], ["foo#NotIgnored"])
self.assertEqual(mmcif_dict["_test_loop"], list("abcdefg"))
def test_loop_keyword_case_insensitive(self):
test_data = """\
data_verbatim_test
_test_key_value foo # Ignore this comment
loop_
_test_loop
a b c d # Ignore this comment
e f g
"""
mmcif_dict = MMCIF2Dict(io.StringIO(textwrap.dedent(test_data)))
mmcif_dict2 = MMCIF2Dict(
io.StringIO(textwrap.dedent(test_data.replace("loop_", "LOOP_")))
)
self.assertDictEqual(mmcif_dict, mmcif_dict2)
mmcif_dict2 = MMCIF2Dict(
io.StringIO(textwrap.dedent(test_data.replace("loop_", "looP_")))
)
self.assertDictEqual(mmcif_dict, mmcif_dict2)
mmcif_dict2 = MMCIF2Dict(
io.StringIO(textwrap.dedent(test_data.replace("_loop", "_LOOP")))
)
self.assertNotEqual(mmcif_dict, mmcif_dict2)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| true | true |
1c3c3280d2c2fbb3f01689606a67c08fd10c80e0 | 1,058 | py | Python | scripts/testInputs/complex.test.py | MalcomnM/Fox | f41e59305c1fb4c008f5e0d712e291525c2b39f2 | [
"Apache-2.0"
] | 3 | 2020-03-03T16:56:43.000Z | 2022-01-03T14:34:16.000Z | scripts/testInputs/complex.test.py | MalcomnM/Fox | f41e59305c1fb4c008f5e0d712e291525c2b39f2 | [
"Apache-2.0"
] | 5 | 2019-10-26T18:01:38.000Z | 2022-02-26T19:26:43.000Z | scripts/testInputs/complex.test.py | MalcomnM/Fox | f41e59305c1fb4c008f5e0d712e291525c2b39f2 | [
"Apache-2.0"
] | null | null | null | # # Nested data
inputs = [
[6.4, 2.8, 5.6, 2.2, 2], # ?
[5.0, 2.3, 3.3, 1.0, 1],
[4.9, 2.5, 4.5, 1.7, 2],
]
# Comprehensions
features = [x[0:-1] for x in inputs] # ?
labels = [x[-1] for x in inputs] # ?
# macros
hat = labels # ?
# print
print(features)
print(labels)
# side effects
b = [*range(1, 4)] # ? <-- Comment Macro ~ Result ->
print('before', b)
b.pop() # ?
print('after', b)
b # ?
b
# functions
def add2(a):
rv = a + 2
rv
return rv
# function nesting and macros
def linked_list_from(*items):
head = None # ?
for new_head in items[::-1]:
head = (new_head, head) # ?
return head
l = linked_list_from(1, 2, 3)
l
a = 1
# Loop stuff
while a < 5:
a
print('Tick', a)
a += 1
for t in range(5):
t
t
t
add2(14) # ?
a = add2(1) # ?
a
# data types
tup = (1, 2, 3) # ?
tup
# assorted
1 + 334 # ? Calculator ->
1 < 0 # ?
text = 'happy' # ?
text
unicode_text = 'é' # ?
unicode_text
# newline characters in strings
x = "foo\nfaa" # ?
# errors
0/0
| 11.5 | 54 | 0.514178 | [6.4, 2.8, 5.6, 2.2, 2],
[5.0, 2.3, 3.3, 1.0, 1],
[4.9, 2.5, 4.5, 1.7, 2],
]
features = [x[0:-1] for x in inputs]
labels = [x[-1] for x in inputs]
hat = labels
print(features)
print(labels)
b = [*range(1, 4)]
print('before', b)
b.pop()
print('after', b)
b
b
def add2(a):
rv = a + 2
rv
return rv
def linked_list_from(*items):
head = None
for new_head in items[::-1]:
head = (new_head, head)
return head
l = linked_list_from(1, 2, 3)
l
a = 1
while a < 5:
a
print('Tick', a)
a += 1
for t in range(5):
t
t
t
add2(14)
a = add2(1)
a
tup = (1, 2, 3)
tup
1 + 334
1 < 0
text = 'happy'
text
unicode_text = 'é'
unicode_text
x = "foo\nfaa"
0/0
| true | true |
1c3c3327e6e14b41083d7661c8b8d877a5576e07 | 2,280 | py | Python | modifier/bootstrap.py | lenoch/tagsetbench | 98c5474197af3087a28f73b54398abec0534c7d8 | [
"MIT"
] | null | null | null | modifier/bootstrap.py | lenoch/tagsetbench | 98c5474197af3087a28f73b54398abec0534c7d8 | [
"MIT"
] | null | null | null | modifier/bootstrap.py | lenoch/tagsetbench | 98c5474197af3087a28f73b54398abec0534c7d8 | [
"MIT"
] | null | null | null | from . import Modifier
# TODO: asi by to už v pohodě šlo s obecným modifikátorem, protože konečně umím
# předávat dodatečný parametry – takže prostě "implicit/silent" a hotovo
class punctuation(Modifier):
"""
Explicit, fine-grained, “late-bound” bootstrap.
"""
def __init__(self, modifier):
modifier.params['match'] = {
'k': '\?',
'word': """[-,.():?"!;\'/]|...""",
}
super().__init__(modifier)
self.explicit = modifier.params.get('explicit', 'no').lower() in (
'yes', 'true')
####self.untagged_sign = modifier.params.get('', 'no').lower() in (
#### 'yes', 'true')
# remove the hacked-in MATCH dictionary from the modifier name
mod = self.name.split(';')
self.name = ';'.join(mod[:mod.index('MATCH')])
def __call__(self, sentence):
for token in sentence.tokens:
if self.match_attributes(token):
token['k'] = 'I'
# NOTE: bootstraping leaves no mark unless you make a wish
if self.explicit:
token['z'] = 'X' # nasrat na zX; stačí <phr/>
token.set_modified_by(self.name)
# TODO: používat přímo Modification.name?
class cardinal_number(Modifier):
"""
Explicit, fine-grained, “late-bound” bootstrap.
"""
def __init__(self, modifier):
modifier.params['match'] = {
'k': '\?',
'word': '[0-9 ]+',
}
super().__init__(modifier)
self.explicit = modifier.params.get('explicit', 'no').lower() in (
'yes', 'true')
# remove the hacked-in MATCH dictionary from the modifier name
mod = self.name.split(';')
self.name = ';'.join(mod[:mod.index('MATCH')])
def __call__(self, sentence):
for token in sentence.tokens:
if self.match_attributes(token):
token['k'] = '4'
token['x'] = 'C' # základní číslovka
if self.explicit:
token['z'] = 'X' # tohle jde v pohodě zahodit…
token.set_modified_by(self.name)
# elif line == '%':
# tag = 'k1gNzX' # lemma: “procento” (neutral gender)
# else:
# tag = 'k1zX'
| 34.545455 | 79 | 0.52807 | from . import Modifier
class punctuation(Modifier):
def __init__(self, modifier):
modifier.params['match'] = {
'k': '\?',
'word': """[-,.():?"!;\'/]|...""",
}
super().__init__(modifier)
self.explicit = modifier.params.get('explicit', 'no').lower() in (
'yes', 'true')
####self.untagged_sign = modifier.params.get('', 'no').lower() in (
#### 'yes', 'true')
# remove the hacked-in MATCH dictionary from the modifier name
mod = self.name.split(';')
self.name = ';'.join(mod[:mod.index('MATCH')])
def __call__(self, sentence):
for token in sentence.tokens:
if self.match_attributes(token):
token['k'] = 'I'
# NOTE: bootstraping leaves no mark unless you make a wish
if self.explicit:
token['z'] = 'X' # nasrat na zX; stačí <phr/>
token.set_modified_by(self.name)
# TODO: používat přímo Modification.name?
class cardinal_number(Modifier):
def __init__(self, modifier):
modifier.params['match'] = {
'k': '\?',
'word': '[0-9 ]+',
}
super().__init__(modifier)
self.explicit = modifier.params.get('explicit', 'no').lower() in (
'yes', 'true')
# remove the hacked-in MATCH dictionary from the modifier name
mod = self.name.split(';')
self.name = ';'.join(mod[:mod.index('MATCH')])
def __call__(self, sentence):
for token in sentence.tokens:
if self.match_attributes(token):
token['k'] = '4'
token['x'] = 'C' # základní číslovka
if self.explicit:
token['z'] = 'X' # tohle jde v pohodě zahodit…
token.set_modified_by(self.name)
# elif line == '%':
# tag = 'k1gNzX' # lemma: “procento” (neutral gender)
# else:
# tag = 'k1zX'
| true | true |
1c3c334407e5c7b5aa6d71dce501df751deb4831 | 1,643 | py | Python | oslo-modules/oslo_messaging/_drivers/zmq_driver/zmq_names.py | esse-io/zen-common | 8ede82ab81bad53c3b947084b812c44e329f159b | [
"Apache-2.0"
] | null | null | null | oslo-modules/oslo_messaging/_drivers/zmq_driver/zmq_names.py | esse-io/zen-common | 8ede82ab81bad53c3b947084b812c44e329f159b | [
"Apache-2.0"
] | null | null | null | oslo-modules/oslo_messaging/_drivers/zmq_driver/zmq_names.py | esse-io/zen-common | 8ede82ab81bad53c3b947084b812c44e329f159b | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_messaging._drivers.zmq_driver import zmq_async
zmq = zmq_async.import_zmq()
ZMQ_SOCKET_STR = {zmq.DEALER: "DEALER",
zmq.ROUTER: "ROUTER",
zmq.PUSH: "PUSH",
zmq.PULL: "PULL",
zmq.REQ: "REQ",
zmq.REP: "REP",
zmq.PUB: "PUB",
zmq.SUB: "SUB"}
FIELD_FAILURE = 'failure'
FIELD_REPLY = 'reply'
FIELD_LOG_FAILURE = 'log_failure'
FIELD_ID = 'id'
CALL_TYPE = 'call'
CAST_TYPE = 'cast'
CAST_FANOUT_TYPE = 'cast-f'
NOTIFY_TYPE = 'notify'
NOTIFY_FANOUT_TYPE = 'notify-f'
MESSAGE_TYPES = (CALL_TYPE,
CAST_TYPE,
CAST_FANOUT_TYPE,
NOTIFY_TYPE,
NOTIFY_FANOUT_TYPE)
MULTISEND_TYPES = (CAST_FANOUT_TYPE, NOTIFY_FANOUT_TYPE)
DIRECT_TYPES = (CALL_TYPE, CAST_TYPE, NOTIFY_TYPE)
CAST_TYPES = (CAST_TYPE, CAST_FANOUT_TYPE)
NOTIFY_TYPES = (NOTIFY_TYPE, NOTIFY_FANOUT_TYPE)
def socket_type_str(socket_type):
return ZMQ_SOCKET_STR[socket_type]
| 30.425926 | 78 | 0.657334 |
from oslo_messaging._drivers.zmq_driver import zmq_async
zmq = zmq_async.import_zmq()
ZMQ_SOCKET_STR = {zmq.DEALER: "DEALER",
zmq.ROUTER: "ROUTER",
zmq.PUSH: "PUSH",
zmq.PULL: "PULL",
zmq.REQ: "REQ",
zmq.REP: "REP",
zmq.PUB: "PUB",
zmq.SUB: "SUB"}
FIELD_FAILURE = 'failure'
FIELD_REPLY = 'reply'
FIELD_LOG_FAILURE = 'log_failure'
FIELD_ID = 'id'
CALL_TYPE = 'call'
CAST_TYPE = 'cast'
CAST_FANOUT_TYPE = 'cast-f'
NOTIFY_TYPE = 'notify'
NOTIFY_FANOUT_TYPE = 'notify-f'
MESSAGE_TYPES = (CALL_TYPE,
CAST_TYPE,
CAST_FANOUT_TYPE,
NOTIFY_TYPE,
NOTIFY_FANOUT_TYPE)
MULTISEND_TYPES = (CAST_FANOUT_TYPE, NOTIFY_FANOUT_TYPE)
DIRECT_TYPES = (CALL_TYPE, CAST_TYPE, NOTIFY_TYPE)
CAST_TYPES = (CAST_TYPE, CAST_FANOUT_TYPE)
NOTIFY_TYPES = (NOTIFY_TYPE, NOTIFY_FANOUT_TYPE)
def socket_type_str(socket_type):
return ZMQ_SOCKET_STR[socket_type]
| true | true |
1c3c3423cef8d7348d670b9e463926eb2563df13 | 87 | py | Python | tests/test_comeon.py | zen-xu/comeon | 263eaa1595d29a4cf25708aaf38080f92bc5a454 | [
"MIT"
] | null | null | null | tests/test_comeon.py | zen-xu/comeon | 263eaa1595d29a4cf25708aaf38080f92bc5a454 | [
"MIT"
] | null | null | null | tests/test_comeon.py | zen-xu/comeon | 263eaa1595d29a4cf25708aaf38080f92bc5a454 | [
"MIT"
] | null | null | null | from comeon import __version__
def test_version():
assert __version__ == "0.1.0"
| 14.5 | 33 | 0.712644 | from comeon import __version__
def test_version():
assert __version__ == "0.1.0"
| true | true |
1c3c348f4c6ea52b8a0bbfa6f38d089bee3ef57c | 7,949 | py | Python | task_set/optimizers/adam8p.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | task_set/optimizers/adam8p.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | task_set/optimizers/adam8p.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Adam with extra hyper parameters for l1, l2 reg and lr schedules."""
import re
from typing import Text, List, Dict, Any
import numpy as np
from task_set import registry
from task_set.optimizers import base
from task_set.optimizers import utils
import tensorflow.compat.v1 as tf
class Adam8POptimizer(base.BaseOptimizer):
r"""8 hyper parameter Adam.
This is the Adam optimizer[1] with the addition of l1 and l2 regularization
and a combination of linear and exponential learning rate decay.
Note the l1 and l2 regularization is added to the loss. See AdamW[2] for a
discussion of why this might be a bad idea.
The update is as follows:
# initialize variables
m <- 0
v <- 0
beta1p <- beta1
beta2p <- beta2
# updating x \in R^N:
g = d/dx(f(x) + l2*||x||^2_2 + l1*||x||_1)
m <- beta1 * m + (1.0 - beta1)*g
v <- beta2 * v + (1.0 - beta2)*g^2
mh <- m / (1 - beta1p)
vh <- v / (v - beta2p)
update <- mh / (sqrt(vh+1e-10) + epsilon)
beta1p <- beta1 * beta1p
beta2p <- beta2 * beta2p
linear_factor <- max(1 - linear_decay * global_step, 0.0)
exp_factor <- exp(-exponential_decay * global_step)
lr = exp_factor * linear_factor * learning_rate
x <- lr * linear_factor * exp_factor * update
[1] https://arxiv.org/abs/1412.6980
[2] https://arxiv.org/abs/1711.05101
"""
def __init__(
self,
learning_rate = 1e-3,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
l1 = 1e-7,
l2 = 1e-7,
linear_decay = 0.0,
exponential_decay = 0.0,
reg_factor = 1.0,
training_steps = 10000,
):
"""Initialize the optimizer. See class documentation for equations."""
self._learning_rate = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._l1 = l1
self._l2 = l2
self._linear_decay = linear_decay
self._exponential_decay = exponential_decay
self._reg_factor = reg_factor
self._training_steps = training_steps
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
def minimize(self, loss, global_step,
var_list):
"""Create op that applies Adam8p step."""
if not var_list:
raise ValueError("Explicitly pass var_list!")
if not global_step:
raise ValueError("Explicitly pass global_step!")
# Add regularization to the loss
grads_and_vars = self.compute_gradients(loss, var_list=var_list)
return self.apply_gradients(grads_and_vars, global_step=global_step)
def apply_gradients(self, grads_and_vars, global_step, name=None):
"""Perform an update with the parameters."""
# we meta-train with 10k steps. When applying to longer problems we want to
# have a reasonable schedule so we rescale.
rescale_global_step = float(10000) / self._training_steps * tf.to_float(
global_step)
beta1_power = tf.get_variable(
dtype=tf.float32, name="beta1_power", initializer=self._beta1)
beta2_power = tf.get_variable(
dtype=tf.float32, name="beta2_power", initializer=self._beta2)
exp_factor = tf.exp(-self._exponential_decay *
tf.to_float(rescale_global_step))
# lr reduction per step.
linear_factor = tf.maximum(
1 - self._linear_decay * tf.to_float(rescale_global_step), 0.0)
lr = exp_factor * linear_factor * self._learning_rate
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
# sparse to dense conversion
grad = tf.convert_to_tensor(grad)
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
next_m = (self._beta1 * m + (1.0 - self._beta1) * grad)
next_v = (self._beta2 * v + (1.0 - self._beta2) * tf.square(grad))
next_m_hat = next_m / (1 - beta1_power)
next_v_hat = next_v / (1 - beta2_power)
update = next_m_hat / (tf.sqrt(next_v_hat + 1e-10) + self._epsilon)
next_param = param - lr * update
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
# Do this after all other assignments are done to prevent a race condition.
with tf.control_dependencies(assignments):
assignments.extend([
beta1_power.assign(beta1_power * self._beta1),
beta2_power.assign(beta2_power * self._beta2),
global_step.assign_add(1),
])
return tf.group(*assignments, name=name)
def compute_gradients(self, loss, var_list=None, **kwargs):
if not var_list:
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if self._l1:
l1 = tf.add_n(
[tf.reduce_sum(tf.abs(p)) * self._reg_factor for p in var_list])
loss = loss + l1 * self._l1
if self._l2:
l2 = tf.add_n(
[tf.reduce_sum(tf.square(p)) * self._reg_factor for p in var_list])
loss = loss + l2 * self._l2
grads_and_vars = zip(
tf.gradients(loss, var_list, colocate_gradients_with_ops=True),
var_list)
return grads_and_vars
Adam8PConfig = Dict[Text, Any]
@registry.optimizers_registry.register_sampler("adam8p_wide_grid")
def sample_adam8p_wide_grid(seed):
"""Sample a random configuration from a wide grid for adam8p."""
rng = np.random.RandomState(seed)
cfg = {
"learning_rate": utils.sample_log_float(rng, 1e-8, 1e1),
"beta1": 1 - utils.sample_log_float(rng, 1e-4, 1e0),
"beta2": 1 - utils.sample_log_float(rng, 1e-6, 1e0),
"epsilon": utils.sample_log_float(rng, 1e-10, 1e3),
"l1": utils.sample_log_float(rng, 1e-8, 1e1),
"l2": utils.sample_log_float(rng, 1e-8, 1e1),
"linear_decay": utils.sample_log_float(rng, 1e-7, 1e-4),
"exponential_decay": utils.sample_log_float(rng, 1e-3, 1e-6),
}
return cfg
@registry.optimizers_registry.register_getter("adam8p_wide_grid")
def get_adam8p(
cfg,
training_steps = 10000 # pylint: disable=unused-argument
):
return Adam8POptimizer(**cfg)
@registry.optimizers_registry.register_sampler("adam6p_wide_grid")
def sample_adam6p_wide_grid(seed):
"""Sample a random configuration from a wide grid for adam6p."""
rng = np.random.RandomState(seed + 123455)
cfg = {
"learning_rate": utils.sample_log_float(rng, 1e-8, 1e1),
"beta1": 1 - utils.sample_log_float(rng, 1e-4, 1e0),
"beta2": 1 - utils.sample_log_float(rng, 1e-6, 1e0),
"epsilon": utils.sample_log_float(rng, 1e-10, 1e3),
"linear_decay": utils.sample_log_float(rng, 1e-7, 1e-4),
"exponential_decay": utils.sample_log_float(rng, 1e-3, 1e-6),
}
return cfg
@registry.optimizers_registry.register_getter("adam6p_wide_grid")
def get_adam6p(cfg, training_steps = 10000):
return Adam8POptimizer(l1=0.0, l2=0.0, training_steps=training_steps, **cfg)
| 32.313008 | 79 | 0.670273 |
import re
from typing import Text, List, Dict, Any
import numpy as np
from task_set import registry
from task_set.optimizers import base
from task_set.optimizers import utils
import tensorflow.compat.v1 as tf
class Adam8POptimizer(base.BaseOptimizer):
def __init__(
self,
learning_rate = 1e-3,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
l1 = 1e-7,
l2 = 1e-7,
linear_decay = 0.0,
exponential_decay = 0.0,
reg_factor = 1.0,
training_steps = 10000,
):
self._learning_rate = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._l1 = l1
self._l2 = l2
self._linear_decay = linear_decay
self._exponential_decay = exponential_decay
self._reg_factor = reg_factor
self._training_steps = training_steps
def _get_variable_name(self, param_name):
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
def minimize(self, loss, global_step,
var_list):
if not var_list:
raise ValueError("Explicitly pass var_list!")
if not global_step:
raise ValueError("Explicitly pass global_step!")
grads_and_vars = self.compute_gradients(loss, var_list=var_list)
return self.apply_gradients(grads_and_vars, global_step=global_step)
def apply_gradients(self, grads_and_vars, global_step, name=None):
rescale_global_step = float(10000) / self._training_steps * tf.to_float(
global_step)
beta1_power = tf.get_variable(
dtype=tf.float32, name="beta1_power", initializer=self._beta1)
beta2_power = tf.get_variable(
dtype=tf.float32, name="beta2_power", initializer=self._beta2)
exp_factor = tf.exp(-self._exponential_decay *
tf.to_float(rescale_global_step))
linear_factor = tf.maximum(
1 - self._linear_decay * tf.to_float(rescale_global_step), 0.0)
lr = exp_factor * linear_factor * self._learning_rate
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
grad = tf.convert_to_tensor(grad)
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
next_m = (self._beta1 * m + (1.0 - self._beta1) * grad)
next_v = (self._beta2 * v + (1.0 - self._beta2) * tf.square(grad))
next_m_hat = next_m / (1 - beta1_power)
next_v_hat = next_v / (1 - beta2_power)
update = next_m_hat / (tf.sqrt(next_v_hat + 1e-10) + self._epsilon)
next_param = param - lr * update
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
with tf.control_dependencies(assignments):
assignments.extend([
beta1_power.assign(beta1_power * self._beta1),
beta2_power.assign(beta2_power * self._beta2),
global_step.assign_add(1),
])
return tf.group(*assignments, name=name)
def compute_gradients(self, loss, var_list=None, **kwargs):
if not var_list:
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if self._l1:
l1 = tf.add_n(
[tf.reduce_sum(tf.abs(p)) * self._reg_factor for p in var_list])
loss = loss + l1 * self._l1
if self._l2:
l2 = tf.add_n(
[tf.reduce_sum(tf.square(p)) * self._reg_factor for p in var_list])
loss = loss + l2 * self._l2
grads_and_vars = zip(
tf.gradients(loss, var_list, colocate_gradients_with_ops=True),
var_list)
return grads_and_vars
Adam8PConfig = Dict[Text, Any]
@registry.optimizers_registry.register_sampler("adam8p_wide_grid")
def sample_adam8p_wide_grid(seed):
rng = np.random.RandomState(seed)
cfg = {
"learning_rate": utils.sample_log_float(rng, 1e-8, 1e1),
"beta1": 1 - utils.sample_log_float(rng, 1e-4, 1e0),
"beta2": 1 - utils.sample_log_float(rng, 1e-6, 1e0),
"epsilon": utils.sample_log_float(rng, 1e-10, 1e3),
"l1": utils.sample_log_float(rng, 1e-8, 1e1),
"l2": utils.sample_log_float(rng, 1e-8, 1e1),
"linear_decay": utils.sample_log_float(rng, 1e-7, 1e-4),
"exponential_decay": utils.sample_log_float(rng, 1e-3, 1e-6),
}
return cfg
@registry.optimizers_registry.register_getter("adam8p_wide_grid")
def get_adam8p(
cfg,
training_steps = 10000
):
return Adam8POptimizer(**cfg)
@registry.optimizers_registry.register_sampler("adam6p_wide_grid")
def sample_adam6p_wide_grid(seed):
rng = np.random.RandomState(seed + 123455)
cfg = {
"learning_rate": utils.sample_log_float(rng, 1e-8, 1e1),
"beta1": 1 - utils.sample_log_float(rng, 1e-4, 1e0),
"beta2": 1 - utils.sample_log_float(rng, 1e-6, 1e0),
"epsilon": utils.sample_log_float(rng, 1e-10, 1e3),
"linear_decay": utils.sample_log_float(rng, 1e-7, 1e-4),
"exponential_decay": utils.sample_log_float(rng, 1e-3, 1e-6),
}
return cfg
@registry.optimizers_registry.register_getter("adam6p_wide_grid")
def get_adam6p(cfg, training_steps = 10000):
return Adam8POptimizer(l1=0.0, l2=0.0, training_steps=training_steps, **cfg)
| true | true |
1c3c35218d2c88b973ab6e73fd319baf9b30cd97 | 5,599 | py | Python | domainbed/lib/misc.py | abhimanyudubey/DomainBed | 5c8373e40a04035081937b0fa3eb4fa5339dae32 | [
"MIT"
] | 6 | 2021-08-05T11:50:34.000Z | 2022-02-28T15:29:09.000Z | domainbed/lib/misc.py | abhimanyudubey/DomainBed | 5c8373e40a04035081937b0fa3eb4fa5339dae32 | [
"MIT"
] | null | null | null | domainbed/lib/misc.py | abhimanyudubey/DomainBed | 5c8373e40a04035081937b0fa3eb4fa5339dae32 | [
"MIT"
] | 1 | 2021-09-30T12:25:07.000Z | 2021-09-30T12:25:07.000Z | """
Things that don't belong anywhere else
"""
import hashlib
import json
import os
import sys
from shutil import copyfile
from collections import OrderedDict
from numbers import Number
import operator
import numpy as np
import torch
import tqdm
from collections import Counter
import torch.nn.functional as F
def cross_entropy(x, y):
""" Wrapper around cross-entropy to allow for both one-hot and many-hot
combinations (many hot version is scaled accordingly). """
if len(y.shape) == 1:
return F.cross_entropy(x, y)
if y.shape[1] == 1:
y = y.squeeze(1)
return F.cross_entropy(x, y)
return torch.mean(
torch.div(
F.binary_cross_entropy_with_logits(x, y, reduction="none"),
torch.sum(y, dim=1),
)
)
def make_weights_for_balanced_classes(dataset):
counts = Counter()
classes = []
for _, y in dataset:
y = int(y)
counts[y] += 1
classes.append(y)
n_classes = len(counts)
weight_per_class = {}
for y in counts:
weight_per_class[y] = 1 / (counts[y] * n_classes)
weights = torch.zeros(len(dataset))
for i, y in enumerate(classes):
weights[i] = weight_per_class[int(y)]
return weights
def pdb():
sys.stdout = sys.__stdout__
import pdb
print("Launching PDB, enter 'n' to step to parent function.")
pdb.set_trace()
def seed_hash(*args):
"""
Derive an integer hash from all args, for use as a random seed.
"""
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31)
def print_separator():
print("="*80)
def print_row(row, colwidth=10, latex=False):
if latex:
sep = " & "
end_ = "\\\\"
else:
sep = " "
end_ = ""
def format_val(x):
if np.issubdtype(type(x), np.floating):
x = "{:.10f}".format(x)
return str(x).ljust(colwidth)[:colwidth]
print(sep.join([format_val(x) for x in row]), end_)
class _SplitDataset(torch.utils.data.Dataset):
"""Used by split_dataset"""
def __init__(self, underlying_dataset, keys):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
def __getitem__(self, key):
return self.underlying_dataset[self.keys[key]]
def __len__(self):
return len(self.keys)
def split_dataset(dataset, n, seed=0):
"""
Return a pair of datasets corresponding to a random split of the given
dataset, with n datapoints in the first dataset and the rest in the last,
using the given random seed
"""
assert(n <= len(dataset))
keys = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(keys)
keys_1 = keys[:n]
keys_2 = keys[n:]
return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)
def random_pairs_of_minibatches(minibatches):
perm = torch.randperm(len(minibatches)).tolist()
pairs = []
for i in range(len(minibatches)):
j = i + 1 if i < (len(minibatches) - 1) else 0
xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1]
xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
return pairs
def accuracy(network, loader, device, proto=0):
correct = 0
total = 0
network.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
if proto >= 0:
p = network.predict(x, proto, device)
else:
p = network.predict(x)
batch_weights = torch.ones(len(x))
batch_weights = batch_weights.cuda()
if p.size(1) == 1:
correct += (p.gt(0).eq(y).float() * batch_weights).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
network.train()
return correct / total
class Tee:
def __init__(self, fname, mode="a"):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
class ParamDict(OrderedDict):
"""Code adapted from https://github.com/Alok/rl_implementations/tree/master/reptile.
A dictionary where the values are Tensors, meant to represent weights of
a model. This subclass lets you perform arithmetic on weights directly."""
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
def _prototype(self, other, op):
if isinstance(other, Number):
return ParamDict({k: op(v, other) for k, v in self.items()})
elif isinstance(other, dict):
return ParamDict({k: op(self[k], other[k]) for k in self})
else:
raise NotImplementedError
def __add__(self, other):
return self._prototype(other, operator.add)
def __rmul__(self, other):
return self._prototype(other, operator.mul)
__mul__ = __rmul__
def __neg__(self):
return ParamDict({k: -v for k, v in self.items()})
def __rsub__(self, other):
# a- b := a + (-b)
return self.__add__(other.__neg__())
__sub__ = __rsub__
def __truediv__(self, other):
return self._prototype(other, operator.truediv)
| 27.312195 | 88 | 0.607609 |
import hashlib
import json
import os
import sys
from shutil import copyfile
from collections import OrderedDict
from numbers import Number
import operator
import numpy as np
import torch
import tqdm
from collections import Counter
import torch.nn.functional as F
def cross_entropy(x, y):
if len(y.shape) == 1:
return F.cross_entropy(x, y)
if y.shape[1] == 1:
y = y.squeeze(1)
return F.cross_entropy(x, y)
return torch.mean(
torch.div(
F.binary_cross_entropy_with_logits(x, y, reduction="none"),
torch.sum(y, dim=1),
)
)
def make_weights_for_balanced_classes(dataset):
counts = Counter()
classes = []
for _, y in dataset:
y = int(y)
counts[y] += 1
classes.append(y)
n_classes = len(counts)
weight_per_class = {}
for y in counts:
weight_per_class[y] = 1 / (counts[y] * n_classes)
weights = torch.zeros(len(dataset))
for i, y in enumerate(classes):
weights[i] = weight_per_class[int(y)]
return weights
def pdb():
sys.stdout = sys.__stdout__
import pdb
print("Launching PDB, enter 'n' to step to parent function.")
pdb.set_trace()
def seed_hash(*args):
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31)
def print_separator():
print("="*80)
def print_row(row, colwidth=10, latex=False):
if latex:
sep = " & "
end_ = "\\\\"
else:
sep = " "
end_ = ""
def format_val(x):
if np.issubdtype(type(x), np.floating):
x = "{:.10f}".format(x)
return str(x).ljust(colwidth)[:colwidth]
print(sep.join([format_val(x) for x in row]), end_)
class _SplitDataset(torch.utils.data.Dataset):
def __init__(self, underlying_dataset, keys):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
def __getitem__(self, key):
return self.underlying_dataset[self.keys[key]]
def __len__(self):
return len(self.keys)
def split_dataset(dataset, n, seed=0):
assert(n <= len(dataset))
keys = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(keys)
keys_1 = keys[:n]
keys_2 = keys[n:]
return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)
def random_pairs_of_minibatches(minibatches):
perm = torch.randperm(len(minibatches)).tolist()
pairs = []
for i in range(len(minibatches)):
j = i + 1 if i < (len(minibatches) - 1) else 0
xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1]
xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
return pairs
def accuracy(network, loader, device, proto=0):
correct = 0
total = 0
network.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
if proto >= 0:
p = network.predict(x, proto, device)
else:
p = network.predict(x)
batch_weights = torch.ones(len(x))
batch_weights = batch_weights.cuda()
if p.size(1) == 1:
correct += (p.gt(0).eq(y).float() * batch_weights).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
network.train()
return correct / total
class Tee:
def __init__(self, fname, mode="a"):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
class ParamDict(OrderedDict):
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
def _prototype(self, other, op):
if isinstance(other, Number):
return ParamDict({k: op(v, other) for k, v in self.items()})
elif isinstance(other, dict):
return ParamDict({k: op(self[k], other[k]) for k in self})
else:
raise NotImplementedError
def __add__(self, other):
return self._prototype(other, operator.add)
def __rmul__(self, other):
return self._prototype(other, operator.mul)
__mul__ = __rmul__
def __neg__(self):
return ParamDict({k: -v for k, v in self.items()})
def __rsub__(self, other):
return self.__add__(other.__neg__())
__sub__ = __rsub__
def __truediv__(self, other):
return self._prototype(other, operator.truediv)
| true | true |
1c3c359251e5128a884e99005c1729a6573be909 | 288 | py | Python | django_project/sunlumo_project/forms.py | icrni/sunlumo | 69f730d5adc42b71a24bcf27f1b8494b8d5dcec0 | [
"MIT"
] | null | null | null | django_project/sunlumo_project/forms.py | icrni/sunlumo | 69f730d5adc42b71a24bcf27f1b8494b8d5dcec0 | [
"MIT"
] | null | null | null | django_project/sunlumo_project/forms.py | icrni/sunlumo | 69f730d5adc42b71a24bcf27f1b8494b8d5dcec0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
LOG = logging.getLogger(__name__)
import django.forms as forms
# class aModelForm(forms.ModelForm):
# class Meta:
# model = aModel
# def __init__(self, *args, **kwargs):
# super(aModelForm, self).__init__(*args, **kwargs)
| 22.153846 | 59 | 0.642361 |
import logging
LOG = logging.getLogger(__name__)
import django.forms as forms
| true | true |
1c3c360f4d51e9c6d43ade8a7cacfd7f42b5ec98 | 7,730 | py | Python | facebook_sdk/facebook.py | zetahernandez/facebook-py-sdk | ab41452b884592f3330336d173fe99491979e1b8 | [
"MIT"
] | 17 | 2017-09-29T20:14:36.000Z | 2018-12-19T19:57:01.000Z | facebook_sdk/facebook.py | zetahernandez/facebook-py-sdk | ab41452b884592f3330336d173fe99491979e1b8 | [
"MIT"
] | 18 | 2017-10-10T14:20:31.000Z | 2020-10-15T17:06:50.000Z | facebook_sdk/facebook.py | zetahernandez/facebook-py-sdk | ab41452b884592f3330336d173fe99491979e1b8 | [
"MIT"
] | 10 | 2017-10-31T22:59:49.000Z | 2021-09-08T11:22:16.000Z | import os
from typing import ( # noqa: F401
TYPE_CHECKING,
Any,
Dict,
Iterable,
Mapping,
Optional,
Text,
Tuple,
Type,
Union,
cast,
)
from facebook_sdk.authentication import (
AccessToken,
OAuth2Client,
)
from facebook_sdk.client import FacebookClient
from facebook_sdk.constants import (
DEFAULT_GRAPH_VERSION,
METHOD_DELETE,
METHOD_GET,
METHOD_POST,
)
from facebook_sdk.exceptions import FacebookSDKException
from facebook_sdk.facebook_file import FacebookFile
from facebook_sdk.request import (
FacebookBatchRequest,
FacebookRequest,
)
if TYPE_CHECKING:
from facebook_sdk.response import FacebookResponse, FacebookBatchResponse # noqa: F401
APP_ID_ENV_NAME = 'FACEBOOK_APP_ID'
APP_SECRET_ENV_NAME = 'FACEBOOK_APP_SECRET'
class FacebookApp(object):
def __init__(self, app_id, app_secret):
# type: (Text, Text) -> None
super(FacebookApp, self).__init__()
self.app_id = app_id
self.secret = app_secret
def access_token(self):
# type: () -> AccessToken
from facebook_sdk.authentication import AccessToken
return AccessToken(
access_token='{app_id}|{secret}'.format(
app_id=self.app_id,
secret=self.secret,
),
)
class Facebook(object):
def __init__(self, **kwargs):
# type: (Any) -> None
super(Facebook, self).__init__()
self.config = {
'app_id': os.getenv(APP_ID_ENV_NAME, kwargs.get('app_id')),
'app_secret': os.getenv(APP_ID_ENV_NAME, kwargs.get('app_secret')),
'default_graph_version': kwargs.get('default_graph_version', DEFAULT_GRAPH_VERSION),
'default_access_token': kwargs.get('default_access_token'),
}
if not self.config['app_id']:
raise FacebookSDKException(
'Required "app_id" key not supplied in config and could not find '
'fallback environment variable "{app_id_env_name}"'.format(
app_id_env_name=APP_ID_ENV_NAME,
)
)
if not self.config['app_secret']:
raise FacebookSDKException(
'Required "app_secret" key not supplied in config '
'and could not find fallback environment variable "{app_secret_env_name}"'.format(
app_secret_env_name=APP_SECRET_ENV_NAME,
)
)
self.default_graph_version = self.config.get('default_graph_version')
if self.config.get('default_access_token'):
self.set_default_access_token(cast(Text, self.config.get('default_access_token')))
self.app = FacebookApp(
app_id=cast(Text, self.config['app_id']),
app_secret=cast(Text, self.config['app_secret']),
)
self.client = FacebookClient(request_timeout=kwargs.get('default_request_timeout'))
self.oauth_client = OAuth2Client(
app=self.app,
client=self.client,
graph_version=self.default_graph_version,
)
def request(
self,
method, # type: Text
endpoint, # type: Text
access_token=None, # type: Optional[Text]
params=None, # type: Optional[Dict]
headers=None, # type: Optional[Dict]
graph_version=None, # type: Optional[Text]
timeout=None, # type: Optional[int]
):
# type: (...) -> FacebookRequest
access_token = access_token or getattr(self, 'default_access_token', None)
graph_version = graph_version or self.default_graph_version
return FacebookRequest(
app=self.app,
method=method,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
timeout=timeout,
)
def send_request(
self,
method, # type: Text
endpoint, # type: Text
access_token=None, # type: Optional[Text]
params=None, # type: Optional[Dict]
headers=None, # type: Optional[Dict]
graph_version=None, # type: Optional[Text]
timeout=None, # type: Optional[int]
):
# type: (...) -> FacebookResponse
request = self.request(
method=method,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
timeout=timeout,
)
response = self.send_facebook_request(request=request)
return response
def send_facebook_request(self, request):
# type: (FacebookRequest) -> FacebookResponse
return self.client.send_request(request=request)
def send_batch_request(
self,
requests, # type: Union[Iterable[FacebookRequest], Mapping[Text, FacebookRequest]]
access_token=None, # type: Optional[Text]
graph_version=None, # type: Optional[Text]
timeout=None, # type: Optional[int]
):
# type: (...) -> FacebookBatchResponse
access_token = access_token or getattr(self, 'default_access_token', None)
graph_version = graph_version or self.default_graph_version
batch_request = FacebookBatchRequest(
app=self.app,
requests=requests,
access_token=access_token,
graph_version=graph_version,
timeout=timeout,
)
response = self.client.send_batch_request(batch_request=batch_request)
return response
def set_default_access_token(self, access_token):
# type: (Union[Text, AccessToken]) -> None
if isinstance(access_token, str):
self.default_access_token = AccessToken(access_token=access_token)
elif isinstance(access_token, AccessToken):
self.default_access_token = access_token
else:
raise ValueError('The default access token must be of type "str" or AccessToken')
def file_to_upload(self, path):
# type: (Text) -> FacebookFile
return FacebookFile(path=path)
def post(
self,
endpoint, # type: Text
access_token=None, # type: Optional[Text]
params=None, # type: Optional[Dict]
headers=None, # type: Optional[Dict]
graph_version=None, # type: Optional[Text]
):
return self.send_request(
method=METHOD_POST,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
)
def get(
self,
endpoint, # type: Text
access_token=None, # type: Optional[Text]
params=None, # type: Optional[Dict]
headers=None, # type: Optional[Dict]
graph_version=None, # type: Optional[Text]
):
return self.send_request(
method=METHOD_GET,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
)
def delete(
self,
endpoint, # type: Text
access_token=None, # type: Optional[Text]
params=None, # type: Optional[Dict]
headers=None, # type: Optional[Dict]
graph_version=None, # type: Optional[Text]
):
return self.send_request(
method=METHOD_DELETE,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
)
| 31.942149 | 98 | 0.607245 | import os
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Mapping,
Optional,
Text,
Tuple,
Type,
Union,
cast,
)
from facebook_sdk.authentication import (
AccessToken,
OAuth2Client,
)
from facebook_sdk.client import FacebookClient
from facebook_sdk.constants import (
DEFAULT_GRAPH_VERSION,
METHOD_DELETE,
METHOD_GET,
METHOD_POST,
)
from facebook_sdk.exceptions import FacebookSDKException
from facebook_sdk.facebook_file import FacebookFile
from facebook_sdk.request import (
FacebookBatchRequest,
FacebookRequest,
)
if TYPE_CHECKING:
from facebook_sdk.response import FacebookResponse, FacebookBatchResponse
APP_ID_ENV_NAME = 'FACEBOOK_APP_ID'
APP_SECRET_ENV_NAME = 'FACEBOOK_APP_SECRET'
class FacebookApp(object):
def __init__(self, app_id, app_secret):
super(FacebookApp, self).__init__()
self.app_id = app_id
self.secret = app_secret
def access_token(self):
from facebook_sdk.authentication import AccessToken
return AccessToken(
access_token='{app_id}|{secret}'.format(
app_id=self.app_id,
secret=self.secret,
),
)
class Facebook(object):
def __init__(self, **kwargs):
super(Facebook, self).__init__()
self.config = {
'app_id': os.getenv(APP_ID_ENV_NAME, kwargs.get('app_id')),
'app_secret': os.getenv(APP_ID_ENV_NAME, kwargs.get('app_secret')),
'default_graph_version': kwargs.get('default_graph_version', DEFAULT_GRAPH_VERSION),
'default_access_token': kwargs.get('default_access_token'),
}
if not self.config['app_id']:
raise FacebookSDKException(
'Required "app_id" key not supplied in config and could not find '
'fallback environment variable "{app_id_env_name}"'.format(
app_id_env_name=APP_ID_ENV_NAME,
)
)
if not self.config['app_secret']:
raise FacebookSDKException(
'Required "app_secret" key not supplied in config '
'and could not find fallback environment variable "{app_secret_env_name}"'.format(
app_secret_env_name=APP_SECRET_ENV_NAME,
)
)
self.default_graph_version = self.config.get('default_graph_version')
if self.config.get('default_access_token'):
self.set_default_access_token(cast(Text, self.config.get('default_access_token')))
self.app = FacebookApp(
app_id=cast(Text, self.config['app_id']),
app_secret=cast(Text, self.config['app_secret']),
)
self.client = FacebookClient(request_timeout=kwargs.get('default_request_timeout'))
self.oauth_client = OAuth2Client(
app=self.app,
client=self.client,
graph_version=self.default_graph_version,
)
def request(
self,
method,
endpoint,
access_token=None,
params=None,
headers=None,
graph_version=None,
timeout=None,
):
access_token = access_token or getattr(self, 'default_access_token', None)
graph_version = graph_version or self.default_graph_version
return FacebookRequest(
app=self.app,
method=method,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
timeout=timeout,
)
def send_request(
self,
method,
endpoint,
access_token=None,
params=None,
headers=None,
graph_version=None,
timeout=None,
):
request = self.request(
method=method,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
timeout=timeout,
)
response = self.send_facebook_request(request=request)
return response
def send_facebook_request(self, request):
return self.client.send_request(request=request)
def send_batch_request(
self,
requests,
access_token=None,
graph_version=None,
timeout=None,
):
access_token = access_token or getattr(self, 'default_access_token', None)
graph_version = graph_version or self.default_graph_version
batch_request = FacebookBatchRequest(
app=self.app,
requests=requests,
access_token=access_token,
graph_version=graph_version,
timeout=timeout,
)
response = self.client.send_batch_request(batch_request=batch_request)
return response
def set_default_access_token(self, access_token):
if isinstance(access_token, str):
self.default_access_token = AccessToken(access_token=access_token)
elif isinstance(access_token, AccessToken):
self.default_access_token = access_token
else:
raise ValueError('The default access token must be of type "str" or AccessToken')
def file_to_upload(self, path):
return FacebookFile(path=path)
def post(
self,
endpoint,
access_token=None,
params=None,
headers=None,
graph_version=None,
):
return self.send_request(
method=METHOD_POST,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
)
def get(
self,
endpoint,
access_token=None,
params=None,
headers=None,
graph_version=None,
):
return self.send_request(
method=METHOD_GET,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
)
def delete(
self,
endpoint,
access_token=None,
params=None,
headers=None,
graph_version=None,
):
return self.send_request(
method=METHOD_DELETE,
access_token=access_token,
endpoint=endpoint,
params=params,
headers=headers,
graph_version=graph_version,
)
| true | true |
1c3c36f7f5a69f28c0e93864a87e86921ab7416d | 837 | py | Python | example/test/test_usestep.py | steven004/pytest_oot | d8267e08191632ba32f0ce84ff0abad8fd842a3d | [
"MIT"
] | 2 | 2015-04-23T07:23:15.000Z | 2020-02-26T16:54:25.000Z | example/test/test_usestep.py | steven004/pytest_oot | d8267e08191632ba32f0ce84ff0abad8fd842a3d | [
"MIT"
] | 3 | 2015-01-20T02:04:13.000Z | 2016-09-19T03:14:26.000Z | example/test/test_usestep.py | steven004/pytest_oot | d8267e08191632ba32f0ce84ff0abad8fd842a3d | [
"MIT"
] | 1 | 2018-03-05T17:30:16.000Z | 2018-03-05T17:30:16.000Z | __author__ = 'Steven LI'
from .testbed import *
from test_steps import *
def test_NumBase_add_mulple():
step("num1.add(3,4,5,6) == 23")
step("num1.multiple(2,4,5) == 200")
step("num4.add(3,4,5,6) == 23")
def test_NumBase_add_multiple2():
steps('''
num1.add(3,4,5,6) == 23
num1.multiple(2,4,5) == 200
num4.add(3,4,5,6) == 41
''')
def test_NumBase_add_multiple3():
s('''
num1.add(3,4,5,6) == 23
num1.multiple(2,4,5) == 200
num4.add(3,4,5,6) == 55
''')
def test_async1():
s('''
num_async.addw(var100, var100) == 100
num_async.data_sync() -t 18
num_async.get_value() == 300
''')
def test_async2():
s('''
num_async.addw(var100, var100) >= 300
num_async.get_value() == 500 --repeat 20
''') | 23.25 | 48 | 0.532855 | __author__ = 'Steven LI'
from .testbed import *
from test_steps import *
def test_NumBase_add_mulple():
step("num1.add(3,4,5,6) == 23")
step("num1.multiple(2,4,5) == 200")
step("num4.add(3,4,5,6) == 23")
def test_NumBase_add_multiple2():
steps('''
num1.add(3,4,5,6) == 23
num1.multiple(2,4,5) == 200
num4.add(3,4,5,6) == 41
''')
def test_NumBase_add_multiple3():
s('''
num1.add(3,4,5,6) == 23
num1.multiple(2,4,5) == 200
num4.add(3,4,5,6) == 55
''')
def test_async1():
s('''
num_async.addw(var100, var100) == 100
num_async.data_sync() -t 18
num_async.get_value() == 300
''')
def test_async2():
s('''
num_async.addw(var100, var100) >= 300
num_async.get_value() == 500 --repeat 20
''') | true | true |
1c3c36f80c202066ee6e4861d511ce5139939c29 | 7,701 | py | Python | dahua.py | d34db33f-1007/asleep_scanner | 7fe03adad8335ce306ae2b7b933d8142205bd8c3 | [
"Apache-2.0"
] | 46 | 2020-04-15T12:03:24.000Z | 2022-03-22T04:39:16.000Z | dahua.py | S0Ulle33/asleep_scanner | 7fe03adad8335ce306ae2b7b933d8142205bd8c3 | [
"Apache-2.0"
] | 9 | 2020-06-14T19:51:04.000Z | 2022-03-08T12:25:04.000Z | dahua.py | S0Ulle33/asleep_scanner | 7fe03adad8335ce306ae2b7b933d8142205bd8c3 | [
"Apache-2.0"
] | 31 | 2020-06-15T09:35:17.000Z | 2022-03-28T22:54:02.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import logging
import re
import socket
import struct
import time
from enum import Enum
#import telegram
#from wrapt_timeout_decorator import *
LOGIN_TEMPLATE = b'\xa0\x00\x00\x60%b\x00\x00\x00%b%b%b%b\x04\x01\x00\x00\x00\x00\xa1\xaa%b&&%b\x00Random:%b\r\n\r\n'
GET_SERIAL = b'\xa4\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00'
GET_CHANNELS = b'\xa8\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
GET_PTZ = b'\xa4\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
GET_SOUND = b'\xa4\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
GET_SNAPSHOT = b'\x11\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\n\x00\x00\x00%b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00%b\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
JPEG_GARBAGE1 = b'\x0a%b\x00\x00\x0a\x00\x00\x00'
JPEG_GARBAGE2 = b'\xbc\x00\x00\x00\x00\x80\x00\x00%b'
# Ускорение / Perfomance
TIMEOUT = 11
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')
class Status(Enum):
SUCCESS = 0
BLOCKED = 2
NONE = -1
class DahuaController:
__slots__ = ('model', 'ip', 'port', 'login', 'password', 'channels_count', 'status', 'sound', '_socket')
def __init__(self, ip=None, port=None, login=None, password=None):
self.model = ''
self.ip = ip
self.port = port
self.login = login
self.password = password
self.channels_count = -1
self.status = Status.NONE
self.sound = None
self._socket = None
if (ip and port) and (login and password):
self.auth(login, password)
def auth(self, login, password):
self._socket = socket.create_connection((self.ip, self.port), TIMEOUT)
self._socket.send(LOGIN_TEMPLATE % (struct.pack('b', 24 + len(login) + len(password)), login.encode('ascii'),
(8 - len(login)) * b'\x00', password.encode('ascii'),
(8 - len(password)) * b'\x00', login.encode('ascii'),
password.encode('ascii'), str(int(time.time())).encode('ascii')))
data = self._socket.recv(128)
if len(data) >= 10:
if data[8] == 1 and data[9] == 4:
self.status = Status.BLOCKED
elif data[8] == 0:
self.login = login
self.password = password
self.status = Status.SUCCESS
else:
self.status = Status.NONE
if self.status is Status.SUCCESS:
self._socket.send(GET_PTZ)
self.model = self.receive_msg().split(b'\x00')[0].decode('ascii')
self.get_sound_info()
self.get_ptz_info()
self.get_channels_count()
def get_sound_info(self):
self._socket.send(GET_SOUND)
get_soundInfo = self.receive_msg()
self.sound = get_soundInfo.split(b'\x00')[0].decode('ascii')
# bot = telegram.Bot(token="", request=telegram.utils.request.Request(connect_timeout=20, read_timeout=20))
# chat_id = bot.get_updates()[-1].message.chat_id
# bot.send_message(chat_id='635172118', text=self.model + " : " + self.sound, timeout=120)
return self.sound
def get_ptz_info(self):
succ = '-PTZ-Sound-Mic'
ptz_data = {
'DH-SD42212T-HN', 'CCTV-Camera-DH-SD50230U-HN', 'CCTV-Camera-DH-SD59220T-HN', 'DH-SD59220T-HN', 'CP-UNC-CS10L1W', 'DHI-HCVR4104C-S3', 'DHI-HCVR4104HS-S2', 'DHI-HCVR4108HS-S2', 'DHI-iDVR5116H-F', 'DHI-NVR4104H', 'DHI-NVR4104HS-P-4KS2', 'DHI-NVR4104-P', 'DHI-NVR4104_P', 'DHI-NVR4104-P-4KS2', 'DHI-NVR4104_W', 'DH-IPC-A35N', 'DH-IPC-A46P', 'DH-IPC-AW12W', 'DH-IPC-AW12WN', 'DH-IPC-AW12WP', 'DH-IPC-K15', 'DH-IPC-K15P', 'DH-IPC-KW12WP', 'DH-SD22204T-GN', 'DH-SD22204T-GN-W', 'DH-SD22204TN-GN', 'DH-SD29204T-GN-W', 'DH-SD-32D203S-HN', 'DH-SD42212T-HN', 'DH-SD42212TN-HN', 'DH-SD50120S-HN', 'DH-SD50220T-HN', 'DH-SD59120T-HN', 'DH-SD59120TN-HN', 'DH-SD59131UN-HNI', 'DH-SD59220SN-HN', 'DH-SD59220T-HN', 'DH-SD59220TN-HN', 'DH-SD59225U-HNI', 'DH-SD59230S-HN', 'DH-SD59230T-HN', 'DH-SD59230U-HNI', 'DH-SD59430U-HN', 'DH-SD59430U-HNI', 'DH-SD6582A-HN', 'DH-SD6C120T-HN', 'DH-SD-6C1220S-HN', 'DH-SD6C220S-HN', 'DH-SD6C220T-HN', 'DH-SD6C230S-HN', 'DVR-HF-A', 'IP2M-841B', 'IP2M-841B-UK', 'IP2M-841W-UK', 'IPC-A15', 'IPC-A35', 'IPC-A7', 'IP Camera', 'IPC-AW12W', 'IPC-HDBW1000E-W', 'IP', 'PTZ', 'IPC', 'IPC-HDBW1320E-W', 'IPC-HDPW4200F-WPT', 'IPC-HDPW4221F-W', 'IPC-HFW1000S-W', 'IPC-HFW1320S-W', 'IPC-HFW1435S-W', 'IPC-HFW2325S-W', 'IPC-HFW4431E-S', 'IPC-HFW5200E-Z12', 'IPC-K100W', 'IPC-K15', 'IPC-K200W', 'IPC-KW100W', 'IPC-KW10W', 'IPC-KW12W', 'IPD-IZ22204T-GN', 'IPM-721S', 'IP PTZ Dome', 'PTZ Dome', 'IPPTZ-EL2L12X-MINI-I', 'LTV-ISDNI3-SDM2', 'MDVR_MEUED', 'RVi-IPC11W', 'SD59120T-HN', 'SD59220TN-HN', 'SD6982A-HN', 'SDQCN8029Z', 'ST-712-IP-PRO-D', 'VTO2111D', 'XS-IPCV026-3W'}
test_sound = re.findall('Dahua.Device.Record.General', self.sound)
# self._socket.send(GET_PTZ)
# get_ptzInfo = self.receive_msg()
# self.model = get_ptzInfo.split(b'\x00')[0].decode('ascii')
if self.model in ptz_data:
self.model = self.model + succ
return self.model
elif test_sound:
#print(test_sound) DEBUG
self.model = self.model + "-Sound-Mic"
return self.model
else:
self.model = "unknown"
return self.model
def get_channels_count(self):
self._socket.send(GET_CHANNELS)
channels = self.receive_msg()
self.channels_count = channels.count(b'&&') + 1
return self.channels_count
def receive_msg(self):
header = self._socket.recv(32)
try:
length = struct.unpack('<H', header[4:6])[0]
except struct.error:
raise struct.error
data = self._socket.recv(length)
return data
# @timeout(15) # (wrapt_timeout_decorator) TODO: play with timer
def get_snapshot(self, channel_id):
channel_id = struct.pack('B', channel_id)
self._socket.send(GET_SNAPSHOT % (channel_id, channel_id))
self._socket.settimeout(4)
data = self.receive_msg_2(channel_id)
self._socket.settimeout(TIMEOUT)
return data
def receive_msg_2(self, c_id):
garbage = JPEG_GARBAGE1 % c_id
garbage2 = JPEG_GARBAGE2 % c_id
data = b''
i = 0
while True: # i != 30
buf = self._socket.recv(1460)
if not buf:
break
if i == 0:
buf = buf[32:]
data += buf
if b'\xff\xd9' in data:
break
i += 1
while garbage in data:
t_start = data.find(garbage)
t_end = t_start + len(garbage)
t_start -= 24
trash = data[t_start:t_end]
data = data.replace(trash, b'')
while garbage2 in data:
t_start = data.find(garbage2)
t_end = t_start + 32
trash = data[t_start:t_end]
data = data.replace(trash, b'')
return data
| 48.13125 | 1,572 | 0.595507 |
import logging
import re
import socket
import struct
import time
from enum import Enum
LOGIN_TEMPLATE = b'\xa0\x00\x00\x60%b\x00\x00\x00%b%b%b%b\x04\x01\x00\x00\x00\x00\xa1\xaa%b&&%b\x00Random:%b\r\n\r\n'
GET_SERIAL = b'\xa4\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00'
GET_CHANNELS = b'\xa8\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
GET_PTZ = b'\xa4\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
GET_SOUND = b'\xa4\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
GET_SNAPSHOT = b'\x11\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\n\x00\x00\x00%b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00%b\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
JPEG_GARBAGE1 = b'\x0a%b\x00\x00\x0a\x00\x00\x00'
JPEG_GARBAGE2 = b'\xbc\x00\x00\x00\x00\x80\x00\x00%b'
TIMEOUT = 11
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')
class Status(Enum):
SUCCESS = 0
BLOCKED = 2
NONE = -1
class DahuaController:
__slots__ = ('model', 'ip', 'port', 'login', 'password', 'channels_count', 'status', 'sound', '_socket')
def __init__(self, ip=None, port=None, login=None, password=None):
self.model = ''
self.ip = ip
self.port = port
self.login = login
self.password = password
self.channels_count = -1
self.status = Status.NONE
self.sound = None
self._socket = None
if (ip and port) and (login and password):
self.auth(login, password)
def auth(self, login, password):
self._socket = socket.create_connection((self.ip, self.port), TIMEOUT)
self._socket.send(LOGIN_TEMPLATE % (struct.pack('b', 24 + len(login) + len(password)), login.encode('ascii'),
(8 - len(login)) * b'\x00', password.encode('ascii'),
(8 - len(password)) * b'\x00', login.encode('ascii'),
password.encode('ascii'), str(int(time.time())).encode('ascii')))
data = self._socket.recv(128)
if len(data) >= 10:
if data[8] == 1 and data[9] == 4:
self.status = Status.BLOCKED
elif data[8] == 0:
self.login = login
self.password = password
self.status = Status.SUCCESS
else:
self.status = Status.NONE
if self.status is Status.SUCCESS:
self._socket.send(GET_PTZ)
self.model = self.receive_msg().split(b'\x00')[0].decode('ascii')
self.get_sound_info()
self.get_ptz_info()
self.get_channels_count()
def get_sound_info(self):
self._socket.send(GET_SOUND)
get_soundInfo = self.receive_msg()
self.sound = get_soundInfo.split(b'\x00')[0].decode('ascii')
return self.sound
def get_ptz_info(self):
succ = '-PTZ-Sound-Mic'
ptz_data = {
'DH-SD42212T-HN', 'CCTV-Camera-DH-SD50230U-HN', 'CCTV-Camera-DH-SD59220T-HN', 'DH-SD59220T-HN', 'CP-UNC-CS10L1W', 'DHI-HCVR4104C-S3', 'DHI-HCVR4104HS-S2', 'DHI-HCVR4108HS-S2', 'DHI-iDVR5116H-F', 'DHI-NVR4104H', 'DHI-NVR4104HS-P-4KS2', 'DHI-NVR4104-P', 'DHI-NVR4104_P', 'DHI-NVR4104-P-4KS2', 'DHI-NVR4104_W', 'DH-IPC-A35N', 'DH-IPC-A46P', 'DH-IPC-AW12W', 'DH-IPC-AW12WN', 'DH-IPC-AW12WP', 'DH-IPC-K15', 'DH-IPC-K15P', 'DH-IPC-KW12WP', 'DH-SD22204T-GN', 'DH-SD22204T-GN-W', 'DH-SD22204TN-GN', 'DH-SD29204T-GN-W', 'DH-SD-32D203S-HN', 'DH-SD42212T-HN', 'DH-SD42212TN-HN', 'DH-SD50120S-HN', 'DH-SD50220T-HN', 'DH-SD59120T-HN', 'DH-SD59120TN-HN', 'DH-SD59131UN-HNI', 'DH-SD59220SN-HN', 'DH-SD59220T-HN', 'DH-SD59220TN-HN', 'DH-SD59225U-HNI', 'DH-SD59230S-HN', 'DH-SD59230T-HN', 'DH-SD59230U-HNI', 'DH-SD59430U-HN', 'DH-SD59430U-HNI', 'DH-SD6582A-HN', 'DH-SD6C120T-HN', 'DH-SD-6C1220S-HN', 'DH-SD6C220S-HN', 'DH-SD6C220T-HN', 'DH-SD6C230S-HN', 'DVR-HF-A', 'IP2M-841B', 'IP2M-841B-UK', 'IP2M-841W-UK', 'IPC-A15', 'IPC-A35', 'IPC-A7', 'IP Camera', 'IPC-AW12W', 'IPC-HDBW1000E-W', 'IP', 'PTZ', 'IPC', 'IPC-HDBW1320E-W', 'IPC-HDPW4200F-WPT', 'IPC-HDPW4221F-W', 'IPC-HFW1000S-W', 'IPC-HFW1320S-W', 'IPC-HFW1435S-W', 'IPC-HFW2325S-W', 'IPC-HFW4431E-S', 'IPC-HFW5200E-Z12', 'IPC-K100W', 'IPC-K15', 'IPC-K200W', 'IPC-KW100W', 'IPC-KW10W', 'IPC-KW12W', 'IPD-IZ22204T-GN', 'IPM-721S', 'IP PTZ Dome', 'PTZ Dome', 'IPPTZ-EL2L12X-MINI-I', 'LTV-ISDNI3-SDM2', 'MDVR_MEUED', 'RVi-IPC11W', 'SD59120T-HN', 'SD59220TN-HN', 'SD6982A-HN', 'SDQCN8029Z', 'ST-712-IP-PRO-D', 'VTO2111D', 'XS-IPCV026-3W'}
test_sound = re.findall('Dahua.Device.Record.General', self.sound)
if self.model in ptz_data:
self.model = self.model + succ
return self.model
elif test_sound:
self.model = self.model + "-Sound-Mic"
return self.model
else:
self.model = "unknown"
return self.model
def get_channels_count(self):
self._socket.send(GET_CHANNELS)
channels = self.receive_msg()
self.channels_count = channels.count(b'&&') + 1
return self.channels_count
def receive_msg(self):
header = self._socket.recv(32)
try:
length = struct.unpack('<H', header[4:6])[0]
except struct.error:
raise struct.error
data = self._socket.recv(length)
return data
channel_id = struct.pack('B', channel_id)
self._socket.send(GET_SNAPSHOT % (channel_id, channel_id))
self._socket.settimeout(4)
data = self.receive_msg_2(channel_id)
self._socket.settimeout(TIMEOUT)
return data
def receive_msg_2(self, c_id):
garbage = JPEG_GARBAGE1 % c_id
garbage2 = JPEG_GARBAGE2 % c_id
data = b''
i = 0
while True:
buf = self._socket.recv(1460)
if not buf:
break
if i == 0:
buf = buf[32:]
data += buf
if b'\xff\xd9' in data:
break
i += 1
while garbage in data:
t_start = data.find(garbage)
t_end = t_start + len(garbage)
t_start -= 24
trash = data[t_start:t_end]
data = data.replace(trash, b'')
while garbage2 in data:
t_start = data.find(garbage2)
t_end = t_start + 32
trash = data[t_start:t_end]
data = data.replace(trash, b'')
return data
| true | true |
1c3c393ffe7073b18db0519cff362aef565702eb | 8,981 | py | Python | rssynergia/base_diagnostics/step_diagnostic.py | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | rssynergia/base_diagnostics/step_diagnostic.py | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | rssynergia/base_diagnostics/step_diagnostic.py | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""?
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import synergia
import os
import numpy as np
import h5py as h5
from mpi4py import MPI
comm_world = MPI.COMM_WORLD
try:
import __builtin__
except ImportError:
# Python 3
import builtins as __builtin__
my_rank = comm_world.rank
def print(*args, **kwargs):
"""Overload print to prevent all ranks from printing"""
if my_rank == 0:
return __builtin__.print(*args, **kwargs)
class CustomDiagnostic(synergia.simulation.Propagate_actions):
def __init__(self, stepper, element_names=[], step_numbers=[], positions=[]):
"""
Create a step_end_action that will called by Synergia to output bunch data at arbitrary points in the lattice.
These points can be chosen by element name, positions in the lattice, or direct input of step number.
Each chosen diagnostic point should be accompanied by a callable function that will operate on the bunch object
and return diagnostic data.
Data is accumulated in Datum objects with information on where and when it was collected.
:param stepper (synergia stepper object): Stepper being used in propagator.
:param element_names (list of (str, func)): Optional list of tuples comprising element names and corresponding
diagnostic function to call
:param step_numbers (list of (int, func)): Optional list of tuples comprising step number and corresponding
diagnostic function to call
:param positions (list of (float, func)): Optional list of tuples comprising position (in meters) and
corresponding diagnostic function to call
"""
synergia.simulation.Propagate_actions.__init__(self)
self.stepper = stepper
self.steps = []
self.diagnostics = []
self.data = []
for elem in element_names:
elem_steps = self.find_element_steps(self.stepper, elem[0])
if len(elem_steps) > 0:
self.steps.append(elem_steps[len(elem_steps) // 2]) # output in middle of element
self.diagnostics.append(elem[1])
elif len(elem_steps) == 1:
self.steps.append(elem_steps[0]) # output in middle of element
self.diagnostics.append(elem[1])
else:
print("Could not find element: {}".format(elem[0]))
for step in step_numbers:
if step[0] not in self.steps:
self.steps.append(step[0])
self.diagnostics.append(step[1])
for pos in positions:
pos_step = self.find_step_position(self.stepper, pos[0])
print("For position: {}, the closest step is {} m away".format(pos[0], pos_step[1]))
if pos_step[0] not in self.steps:
self.steps.append(pos_step[0])
self.diagnostics.append(pos[1])
for step, diag in zip(self.steps, self.diagnostics):
assert callable(diag), "Diagnostic {} is not callable".format(diag)
x = Datum(*self.find_step_information(self.stepper, step))
x.diagnostic_name = diag.__name__
self.data.append(x)
@staticmethod
def find_step_information(stepper, step_number):
for i, step in enumerate(stepper.get_steps()):
if i == step_number:
oper = step.get_operators()[-1]
slc = oper.get_slices()[-1]
slice_element = slc.get_lattice_element()
position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length
return i, slice_element.get_name(), position
@staticmethod
def find_element_steps(stepper, element_name):
# TODO: Need to check element memory address to split out multiple element instances
steps = []
for step_number, step in enumerate(stepper.get_steps()):
oper = step.get_operators()[-1]
slc = oper.get_slices()[-1]
slice_element = slc.get_lattice_element()
if slice_element.get_name() == element_name:
position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length
steps.append(step_number)
print("Step: {}: Slice {}, Element {}, position {}".format(step_number, slc,
slice_element.get_name(),
position))
return steps
@staticmethod
def find_step_position(stepper, target_position):
closest_step = (0, 1e130)
for step_number, step in enumerate(stepper.get_steps()):
oper = step.get_operators()[-1]
slc = oper.get_slices()[-1]
position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length
if abs(target_position - position) < closest_step[1]:
closest_step = (step_number, abs(target_position - position))
return closest_step
def write_datafiles(self, directory, filesnames=None):
if filesnames:
assert len(filesnames) == len(self.data), 'Number of supplied filenames not equal to number of datasets'
else:
filesnames = [None] * len(self.data)
for data, name in zip(self.data, filesnames):
data.write_data(directory, filename=name)
def step_end_action(self, stepper, step, bunch, turn_num, step_num):
"""
Overloads Synergia's default synergia.simulation.Propagate_actions.step_end_action method.
Must maintain parameter input order (stepper, step, bunch, turn_num, step_num) to function.
:param stepper: Synergia stepper object
:param step: Individual step object
:param bunch: Bunch object
:param turn_num: (int) Current turn number
:param step_num: (int) Current step on this turn
:return:
"""
# TODO: Add option for particular turn intervals
# TODO: Collect particle data from other processors automatically if statistical data?
if step_num in self.steps:
indices = np.where(np.array(self.steps) == step_num)[0]
for index in indices:
datum = self.diagnostics[index](bunch)
self.data[index].turn_num.append(turn_num)
self.data[index].data.append(datum)
class Datum:
def __init__(self, step_num, element_name, position):
self.data = []
self.turn_num = []
self.step_num = step_num
self.element_name = element_name
self.position = position
self.diagnostic_name = None
def write_data(self, directory, filename=None):
# TODO: allow for data caching on an interval
if my_rank == 0:
if not os.path.exists(directory):
os.makedirs(directory)
if not filename:
filename = 'data_{}_step_{}.h5'.format(self.diagnostic_name, self.step_num)
filepath = os.path.join(directory, filename)
datafile = h5.File(filepath, 'w')
for name, attr in [('step_num', self.step_num), ('element_name', self.element_name),
('position', self.position)]:
datafile.attrs[name] = attr
datafile.create_dataset('turns', data=self.turn_num)
if type(self.data[0]) == np.ndarray:
for i, d in enumerate(self.data):
datafile.create_dataset('data{}'.format(i), data=d)
else:
datafile.create_dataset('data', data=np.array(self.data))
datafile.close()
def xdistribution(bunch):
all_particles = comm_world.gather(bunch.get_local_particles(), root=0)
if comm_world.rank == 0:
all_particles = np.vstack(all_particles)[:, 0]
minx, maxx = all_particles.min(), all_particles.max()
hist, bins = np.histogram(all_particles, range=(minx, maxx), bins='fd')
centers = []
for i in range(1, bins.size):
centers.append((bins[i] + bins[i - 1]) / 2.)
return np.array([hist, centers])
def xydistribution(bunch):
all_particles = comm_world.gather(bunch.get_local_particles(), root=0)
if comm_world.rank == 0:
all_particles = np.vstack(all_particles)
minx, maxx = all_particles[:, 0].min(), all_particles[:, 0].max()
miny, maxy = all_particles[:, 1].min(), all_particles[:, 1].max()
hist, binsx, binsy = np.histogram2d(all_particles[:, 0],
all_particles[:, 1],
range=[[minx, maxx], [miny, maxy]], bins=64)
hist = np.append(hist, [-1])
return np.array([hist, binsx, binsy])
| 42.563981 | 119 | 0.613183 |
from __future__ import absolute_import, division, print_function
import synergia
import os
import numpy as np
import h5py as h5
from mpi4py import MPI
comm_world = MPI.COMM_WORLD
try:
import __builtin__
except ImportError:
import builtins as __builtin__
my_rank = comm_world.rank
def print(*args, **kwargs):
if my_rank == 0:
return __builtin__.print(*args, **kwargs)
class CustomDiagnostic(synergia.simulation.Propagate_actions):
def __init__(self, stepper, element_names=[], step_numbers=[], positions=[]):
synergia.simulation.Propagate_actions.__init__(self)
self.stepper = stepper
self.steps = []
self.diagnostics = []
self.data = []
for elem in element_names:
elem_steps = self.find_element_steps(self.stepper, elem[0])
if len(elem_steps) > 0:
self.steps.append(elem_steps[len(elem_steps) // 2])
self.diagnostics.append(elem[1])
elif len(elem_steps) == 1:
self.steps.append(elem_steps[0])
self.diagnostics.append(elem[1])
else:
print("Could not find element: {}".format(elem[0]))
for step in step_numbers:
if step[0] not in self.steps:
self.steps.append(step[0])
self.diagnostics.append(step[1])
for pos in positions:
pos_step = self.find_step_position(self.stepper, pos[0])
print("For position: {}, the closest step is {} m away".format(pos[0], pos_step[1]))
if pos_step[0] not in self.steps:
self.steps.append(pos_step[0])
self.diagnostics.append(pos[1])
for step, diag in zip(self.steps, self.diagnostics):
assert callable(diag), "Diagnostic {} is not callable".format(diag)
x = Datum(*self.find_step_information(self.stepper, step))
x.diagnostic_name = diag.__name__
self.data.append(x)
@staticmethod
def find_step_information(stepper, step_number):
for i, step in enumerate(stepper.get_steps()):
if i == step_number:
oper = step.get_operators()[-1]
slc = oper.get_slices()[-1]
slice_element = slc.get_lattice_element()
position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length
return i, slice_element.get_name(), position
@staticmethod
def find_element_steps(stepper, element_name):
steps = []
for step_number, step in enumerate(stepper.get_steps()):
oper = step.get_operators()[-1]
slc = oper.get_slices()[-1]
slice_element = slc.get_lattice_element()
if slice_element.get_name() == element_name:
position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length
steps.append(step_number)
print("Step: {}: Slice {}, Element {}, position {}".format(step_number, slc,
slice_element.get_name(),
position))
return steps
@staticmethod
def find_step_position(stepper, target_position):
closest_step = (0, 1e130)
for step_number, step in enumerate(stepper.get_steps()):
oper = step.get_operators()[-1]
slc = oper.get_slices()[-1]
position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length
if abs(target_position - position) < closest_step[1]:
closest_step = (step_number, abs(target_position - position))
return closest_step
def write_datafiles(self, directory, filesnames=None):
if filesnames:
assert len(filesnames) == len(self.data), 'Number of supplied filenames not equal to number of datasets'
else:
filesnames = [None] * len(self.data)
for data, name in zip(self.data, filesnames):
data.write_data(directory, filename=name)
def step_end_action(self, stepper, step, bunch, turn_num, step_num):
if step_num in self.steps:
indices = np.where(np.array(self.steps) == step_num)[0]
for index in indices:
datum = self.diagnostics[index](bunch)
self.data[index].turn_num.append(turn_num)
self.data[index].data.append(datum)
class Datum:
def __init__(self, step_num, element_name, position):
self.data = []
self.turn_num = []
self.step_num = step_num
self.element_name = element_name
self.position = position
self.diagnostic_name = None
def write_data(self, directory, filename=None):
if my_rank == 0:
if not os.path.exists(directory):
os.makedirs(directory)
if not filename:
filename = 'data_{}_step_{}.h5'.format(self.diagnostic_name, self.step_num)
filepath = os.path.join(directory, filename)
datafile = h5.File(filepath, 'w')
for name, attr in [('step_num', self.step_num), ('element_name', self.element_name),
('position', self.position)]:
datafile.attrs[name] = attr
datafile.create_dataset('turns', data=self.turn_num)
if type(self.data[0]) == np.ndarray:
for i, d in enumerate(self.data):
datafile.create_dataset('data{}'.format(i), data=d)
else:
datafile.create_dataset('data', data=np.array(self.data))
datafile.close()
def xdistribution(bunch):
all_particles = comm_world.gather(bunch.get_local_particles(), root=0)
if comm_world.rank == 0:
all_particles = np.vstack(all_particles)[:, 0]
minx, maxx = all_particles.min(), all_particles.max()
hist, bins = np.histogram(all_particles, range=(minx, maxx), bins='fd')
centers = []
for i in range(1, bins.size):
centers.append((bins[i] + bins[i - 1]) / 2.)
return np.array([hist, centers])
def xydistribution(bunch):
all_particles = comm_world.gather(bunch.get_local_particles(), root=0)
if comm_world.rank == 0:
all_particles = np.vstack(all_particles)
minx, maxx = all_particles[:, 0].min(), all_particles[:, 0].max()
miny, maxy = all_particles[:, 1].min(), all_particles[:, 1].max()
hist, binsx, binsy = np.histogram2d(all_particles[:, 0],
all_particles[:, 1],
range=[[minx, maxx], [miny, maxy]], bins=64)
hist = np.append(hist, [-1])
return np.array([hist, binsx, binsy])
| true | true |
1c3c3a246b808ce6cfc008ff2deb745737b043b8 | 13,005 | py | Python | src/DistPCAE.py | spaghettix/DissP_RL_OCTSC | e03df1ebc5c3ccef66ddf7cd2b05d0106855215f | [
"MIT"
] | null | null | null | src/DistPCAE.py | spaghettix/DissP_RL_OCTSC | e03df1ebc5c3ccef66ddf7cd2b05d0106855215f | [
"MIT"
] | null | null | null | src/DistPCAE.py | spaghettix/DissP_RL_OCTSC | e03df1ebc5c3ccef66ddf7cd2b05d0106855215f | [
"MIT"
] | null | null | null |
"""
ABOUT: DISTANCE PRESERVING CONVOLUTIONAL AUTO-ENCODER
for UNIVARIATE TIME SERIES.
"""
__author__ = 'Stefano Mauceri'
__email__ = 'mauceri.stefano@gmail.com'
# =============================================================================
# IMPORT
# =============================================================================
import numpy as np
import tensorflow as tf
tf.keras.backend.set_floatx('float32')
try:
from .nn_utils import AEBase, LossHistory
except:
from nn_utils import AEBase, LossHistory
from tensorflow.keras import Model, Sequential
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.layers import (Conv1D,
Dense,
Input,
Flatten,
Lambda,
MaxPooling1D,
Reshape,
UpSampling1D)
from tensorflow_addons.losses.metric_learning import pairwise_distance as PD
# =============================================================================
# CLASS
# =============================================================================
class DistPCAE(AEBase):
def __init__(self,
input_size,
n_layers,
latent_dim,
n_filters=16,
kernel_size=0.03,
activation='tanh',
optimizer='Adam',
lr=0.001,
seed=None,
loss_weights=[1., 1.],
**kwargs):
self.input_size = int(input_size)
self.n_layers = int(n_layers)
self.latent_dim = int(latent_dim)
self.n_filters = int(n_filters)
self.kernel_size = kernel_size
self.pooling = self.get_pooling()
self.filters = self.get_filters()
self.kernels = self.get_kernels()
self.seed = seed
self.activation = tf.keras.activations.get(activation)
self.optimizer = getattr(tf.keras.optimizers, optimizer)(learning_rate=lr)
self.encoder_input = Input((self.input_size,1), name='layer_0.in')
self.decoder_input = Input((self.latent_dim,), name='layer_0.out')
self.distance_matrix_input = Input((None,), name='distance_matrix')
self.encoder = self.build_encoder()
self.decoder = self.build_decoder()
self.encoded_input = self.encoder(self.encoder_input)
self.dp_loss = Lambda(PD, name='dp_loss')(self.encoded_input)
self.model = Model(inputs=[self.encoder_input, self.distance_matrix_input],
outputs=[self.decoder(self.encoded_input), self.dp_loss])
self.model.compile(loss=['mse', 'mse'],
loss_weights=loss_weights,
optimizer=self.optimizer)
self.loss_tracker = LossHistory()
self.lr_tracker = ReduceLROnPlateau(monitor='loss',
factor=.5,
patience=100,
min_delta=0.0001,
min_lr=0.0001,
verbose=False)
self.call_backs = [self.loss_tracker, self.lr_tracker]
def build_encoder(self):
model = Sequential(name='Encoder')
ix = 0
for i, p in enumerate(self.pooling):
ix += 1
model.add(Conv1D(filters=self.filters[i],
kernel_size=(self.kernels[i],),
strides=1,
padding='same',
data_format='channels_last',
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix}.conv.in'))
if p:
ix += 1
model.add(MaxPooling1D(pool_size=2,
strides=None,
padding='same',
data_format='channels_last',
name=f'layer_{ix}.pool.in'))
model.add(Conv1D(filters=1,
kernel_size=1,
strides=1,
padding='same',
data_format='channels_last',
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix+1}.1x1conv.in'))
model.add(Flatten(data_format='channels_last'))
self.flat_dim = int(self.input_size / (sum(self.pooling)*2))
model.add(Dense(self.latent_dim,
activation=None,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix+3}.dense.in'))
return model
def build_decoder(self):
model = Sequential(name='Decoder')
model.add(Dense(self.flat_dim,
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name='layer_1.dense.out'))
model.add(Reshape((self.flat_dim,1)))
pooling = np.flip(self.pooling)
filters = np.flip(self.filters)
kernels = np.flip(self.kernels)
ix = 2
for i, p in enumerate(pooling):
ix += 1
if p:
model.add(UpSampling1D(size=2, name=f'layer_{ix}.unpool.out'))
ix += 1
model.add(Conv1D(filters=filters[i],
kernel_size=(kernels[i],),
strides=1,
padding='same',
data_format='channels_last',
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix}.conv.out'))
model.add(Conv1D(filters=1,
kernel_size=1,
strides=1,
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix+1}.1x1conv.out'))
return model
def fit(self, X, distance_matrix, epochs, batch_size=None):
if batch_size is None:
batch_size = X.shape[0]
def generator(X, dist_matrix, batch_size):
nsamples = X.shape[0]
while True:
ix = tf.random.uniform(shape=(batch_size,),
minval=0,
maxval=nsamples,
dtype=tf.int32)
x = tf.gather(X, indices=ix, axis=0)
dm = tf.gather_nd(dist_matrix, indices=tf.stack(tf.meshgrid(ix, ix), axis=-1))
yield (x, dm), (x, dm)
Data = tf.data.Dataset.from_generator(generator,
((tf.float32, tf.float32), (tf.float32, tf.float32)),
((tf.TensorShape([None,self.input_size,1]), tf.TensorShape([batch_size,batch_size])), (tf.TensorShape([None,self.input_size,1]), tf.TensorShape([batch_size,batch_size]))),
args=[X, distance_matrix, batch_size])
steps = int(tf.math.ceil(X.shape[0]/batch_size))
self.model.fit(Data,
epochs=epochs,
shuffle=False,
steps_per_epoch=steps,
callbacks=self.call_backs,
validation_data=None,
verbose=False,
use_multiprocessing=False)
def loss_history(self):
history = self.loss_tracker.get_history()
return {'total_loss': history.loss.values,
'r_loss': history.Decoder_loss.values,
'dp_loss': history.dp_loss_loss.values}
# =============================================================================
# MAIN
# =============================================================================
if __name__ == '__main__':
import os
import matplotlib.pyplot as plt
from dissimilarity import dissimilarity
from scipy.spatial.distance import cdist
# IMPORT DATA
p = os.path.abspath(os.path.join('..', 'data'))
dts = 'Plane'
class_ = 1
X_train = np.load(f'{p}/{dts}/{dts}_X_TRAIN.npy').astype(np.float32)
X_train = AEBase().check_input_conv(X_train)
Y_train = np.load(f'{p}/{dts}/{dts}_Y_TRAIN.npy')
X_train = tf.linalg.normalize(X_train, axis=1, ord='euclidean')[0]
X_train_pos = X_train[(Y_train == class_)]
X_train_neg = X_train[(Y_train != class_)]
X_test = np.load(f'{p}/{dts}/{dts}_X_TEST.npy').astype(np.float32)
X_test = AEBase().check_input_conv(X_test)
Y_test = np.load(f'{p}/{dts}/{dts}_Y_TEST.npy')
X_test = tf.linalg.normalize(X_test, axis=1, ord='euclidean')[0]
X_test_pos = X_test[(Y_test == class_)]
X_test_neg = X_test[(Y_test != class_)]
diss = 'DTW'
D = dissimilarity()
D = getattr(D, diss)
_X = tf.squeeze(X_train_pos, axis=-1)
DM = cdist(_X, _X, metric=D)
DM = tf.linalg.normalize(DM, ord='euclidean')[0]
# MODEL
model = DistPCAE(input_size=X_train_pos.shape[1],
n_layers=5,
latent_dim=2,
optimizer='Adam',
activation='tanh',
lr=0.001)
print('ENC structure: ', model.pooling)
print('Training Samples: ', X_train_pos.shape[0])
model.show_config(model.encoder)
model.show_config(model.decoder)
# FIT
model.fit(X_train_pos,
distance_matrix=DM,
epochs=1000,
batch_size=16)
# PLOT LOSS
loss = model.loss_history()
plt.plot(loss['total_loss'], '-k', label='total')
plt.plot(loss['r_loss'], '-r', label='rec')
plt.plot(loss['dp_loss'], '-b', label='dist')
plt.title('Training Loss')
plt.legend()
plt.show()
plt.close()
# PLOT LATENT REPRESENTATION - TRAINING DATA
X_train_pos_enc = model.encode(X_train_pos)
X_train_neg_enc = model.encode(X_train_neg)
plt.scatter(X_train_neg_enc[:, 0],
X_train_neg_enc[:, 1],
c='k', marker='o', alpha=0.3)
plt.scatter(X_train_pos_enc[:, 0],
X_train_pos_enc[:, 1],
c='r', marker='o', alpha=0.3)
plt.title('Training Data - Latent Space (red=positive - black=negative)')
plt.show()
plt.close()
# PLOT LATENT REPRESENTATION - TEST DATA
X_test_pos_enc = model.encode(X_test_pos)
X_test_neg_enc = model.encode(X_test_neg)
plt.scatter(X_test_neg_enc[:, 0],
X_test_neg_enc[:, 1],
c='k', marker='o', alpha=0.3)
plt.scatter(X_test_pos_enc[:, 0],
X_test_pos_enc[:, 1],
c='b', marker='o', alpha=0.3)
plt.title('Test Data - Latent Space (blue=positive - black=negative)')
plt.show()
plt.close()
# PLOT RECONSTRUCTION
X_original = X_train_pos[1].numpy().reshape(1,-1,1)
X_reconstructed = model.encode_decode(X_original)
plt.plot(X_original.ravel(), '-k')
plt.plot(X_reconstructed.ravel(), '-b')
plt.title('Reconstruction (black=original - blue=reconstred)')
plt.show()
plt.close()
# =============================================================================
# THE END
# ============================================================================= | 34.313984 | 217 | 0.476125 |
__author__ = 'Stefano Mauceri'
__email__ = 'mauceri.stefano@gmail.com'
import numpy as np
import tensorflow as tf
tf.keras.backend.set_floatx('float32')
try:
from .nn_utils import AEBase, LossHistory
except:
from nn_utils import AEBase, LossHistory
from tensorflow.keras import Model, Sequential
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.layers import (Conv1D,
Dense,
Input,
Flatten,
Lambda,
MaxPooling1D,
Reshape,
UpSampling1D)
from tensorflow_addons.losses.metric_learning import pairwise_distance as PD
class DistPCAE(AEBase):
def __init__(self,
input_size,
n_layers,
latent_dim,
n_filters=16,
kernel_size=0.03,
activation='tanh',
optimizer='Adam',
lr=0.001,
seed=None,
loss_weights=[1., 1.],
**kwargs):
self.input_size = int(input_size)
self.n_layers = int(n_layers)
self.latent_dim = int(latent_dim)
self.n_filters = int(n_filters)
self.kernel_size = kernel_size
self.pooling = self.get_pooling()
self.filters = self.get_filters()
self.kernels = self.get_kernels()
self.seed = seed
self.activation = tf.keras.activations.get(activation)
self.optimizer = getattr(tf.keras.optimizers, optimizer)(learning_rate=lr)
self.encoder_input = Input((self.input_size,1), name='layer_0.in')
self.decoder_input = Input((self.latent_dim,), name='layer_0.out')
self.distance_matrix_input = Input((None,), name='distance_matrix')
self.encoder = self.build_encoder()
self.decoder = self.build_decoder()
self.encoded_input = self.encoder(self.encoder_input)
self.dp_loss = Lambda(PD, name='dp_loss')(self.encoded_input)
self.model = Model(inputs=[self.encoder_input, self.distance_matrix_input],
outputs=[self.decoder(self.encoded_input), self.dp_loss])
self.model.compile(loss=['mse', 'mse'],
loss_weights=loss_weights,
optimizer=self.optimizer)
self.loss_tracker = LossHistory()
self.lr_tracker = ReduceLROnPlateau(monitor='loss',
factor=.5,
patience=100,
min_delta=0.0001,
min_lr=0.0001,
verbose=False)
self.call_backs = [self.loss_tracker, self.lr_tracker]
def build_encoder(self):
model = Sequential(name='Encoder')
ix = 0
for i, p in enumerate(self.pooling):
ix += 1
model.add(Conv1D(filters=self.filters[i],
kernel_size=(self.kernels[i],),
strides=1,
padding='same',
data_format='channels_last',
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix}.conv.in'))
if p:
ix += 1
model.add(MaxPooling1D(pool_size=2,
strides=None,
padding='same',
data_format='channels_last',
name=f'layer_{ix}.pool.in'))
model.add(Conv1D(filters=1,
kernel_size=1,
strides=1,
padding='same',
data_format='channels_last',
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix+1}.1x1conv.in'))
model.add(Flatten(data_format='channels_last'))
self.flat_dim = int(self.input_size / (sum(self.pooling)*2))
model.add(Dense(self.latent_dim,
activation=None,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix+3}.dense.in'))
return model
def build_decoder(self):
model = Sequential(name='Decoder')
model.add(Dense(self.flat_dim,
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name='layer_1.dense.out'))
model.add(Reshape((self.flat_dim,1)))
pooling = np.flip(self.pooling)
filters = np.flip(self.filters)
kernels = np.flip(self.kernels)
ix = 2
for i, p in enumerate(pooling):
ix += 1
if p:
model.add(UpSampling1D(size=2, name=f'layer_{ix}.unpool.out'))
ix += 1
model.add(Conv1D(filters=filters[i],
kernel_size=(kernels[i],),
strides=1,
padding='same',
data_format='channels_last',
activation=self.activation,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix}.conv.out'))
model.add(Conv1D(filters=1,
kernel_size=1,
strides=1,
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer={'class_name':'glorot_uniform',
'config':{'seed':self.seed}},
bias_initializer='zeros',
name=f'layer_{ix+1}.1x1conv.out'))
return model
def fit(self, X, distance_matrix, epochs, batch_size=None):
if batch_size is None:
batch_size = X.shape[0]
def generator(X, dist_matrix, batch_size):
nsamples = X.shape[0]
while True:
ix = tf.random.uniform(shape=(batch_size,),
minval=0,
maxval=nsamples,
dtype=tf.int32)
x = tf.gather(X, indices=ix, axis=0)
dm = tf.gather_nd(dist_matrix, indices=tf.stack(tf.meshgrid(ix, ix), axis=-1))
yield (x, dm), (x, dm)
Data = tf.data.Dataset.from_generator(generator,
((tf.float32, tf.float32), (tf.float32, tf.float32)),
((tf.TensorShape([None,self.input_size,1]), tf.TensorShape([batch_size,batch_size])), (tf.TensorShape([None,self.input_size,1]), tf.TensorShape([batch_size,batch_size]))),
args=[X, distance_matrix, batch_size])
steps = int(tf.math.ceil(X.shape[0]/batch_size))
self.model.fit(Data,
epochs=epochs,
shuffle=False,
steps_per_epoch=steps,
callbacks=self.call_backs,
validation_data=None,
verbose=False,
use_multiprocessing=False)
def loss_history(self):
history = self.loss_tracker.get_history()
return {'total_loss': history.loss.values,
'r_loss': history.Decoder_loss.values,
'dp_loss': history.dp_loss_loss.values}
if __name__ == '__main__':
import os
import matplotlib.pyplot as plt
from dissimilarity import dissimilarity
from scipy.spatial.distance import cdist
p = os.path.abspath(os.path.join('..', 'data'))
dts = 'Plane'
class_ = 1
X_train = np.load(f'{p}/{dts}/{dts}_X_TRAIN.npy').astype(np.float32)
X_train = AEBase().check_input_conv(X_train)
Y_train = np.load(f'{p}/{dts}/{dts}_Y_TRAIN.npy')
X_train = tf.linalg.normalize(X_train, axis=1, ord='euclidean')[0]
X_train_pos = X_train[(Y_train == class_)]
X_train_neg = X_train[(Y_train != class_)]
X_test = np.load(f'{p}/{dts}/{dts}_X_TEST.npy').astype(np.float32)
X_test = AEBase().check_input_conv(X_test)
Y_test = np.load(f'{p}/{dts}/{dts}_Y_TEST.npy')
X_test = tf.linalg.normalize(X_test, axis=1, ord='euclidean')[0]
X_test_pos = X_test[(Y_test == class_)]
X_test_neg = X_test[(Y_test != class_)]
diss = 'DTW'
D = dissimilarity()
D = getattr(D, diss)
_X = tf.squeeze(X_train_pos, axis=-1)
DM = cdist(_X, _X, metric=D)
DM = tf.linalg.normalize(DM, ord='euclidean')[0]
model = DistPCAE(input_size=X_train_pos.shape[1],
n_layers=5,
latent_dim=2,
optimizer='Adam',
activation='tanh',
lr=0.001)
print('ENC structure: ', model.pooling)
print('Training Samples: ', X_train_pos.shape[0])
model.show_config(model.encoder)
model.show_config(model.decoder)
model.fit(X_train_pos,
distance_matrix=DM,
epochs=1000,
batch_size=16)
loss = model.loss_history()
plt.plot(loss['total_loss'], '-k', label='total')
plt.plot(loss['r_loss'], '-r', label='rec')
plt.plot(loss['dp_loss'], '-b', label='dist')
plt.title('Training Loss')
plt.legend()
plt.show()
plt.close()
X_train_pos_enc = model.encode(X_train_pos)
X_train_neg_enc = model.encode(X_train_neg)
plt.scatter(X_train_neg_enc[:, 0],
X_train_neg_enc[:, 1],
c='k', marker='o', alpha=0.3)
plt.scatter(X_train_pos_enc[:, 0],
X_train_pos_enc[:, 1],
c='r', marker='o', alpha=0.3)
plt.title('Training Data - Latent Space (red=positive - black=negative)')
plt.show()
plt.close()
X_test_pos_enc = model.encode(X_test_pos)
X_test_neg_enc = model.encode(X_test_neg)
plt.scatter(X_test_neg_enc[:, 0],
X_test_neg_enc[:, 1],
c='k', marker='o', alpha=0.3)
plt.scatter(X_test_pos_enc[:, 0],
X_test_pos_enc[:, 1],
c='b', marker='o', alpha=0.3)
plt.title('Test Data - Latent Space (blue=positive - black=negative)')
plt.show()
plt.close()
X_original = X_train_pos[1].numpy().reshape(1,-1,1)
X_reconstructed = model.encode_decode(X_original)
plt.plot(X_original.ravel(), '-k')
plt.plot(X_reconstructed.ravel(), '-b')
plt.title('Reconstruction (black=original - blue=reconstred)')
plt.show()
plt.close()
| true | true |
1c3c3a7101cd5304cab7ddc2c15045fb5d973ec2 | 3,596 | py | Python | gui/accounts/utils.py | klebed/esdc-ce | 2c9e4591f344247d345a83880ba86777bb794460 | [
"Apache-2.0"
] | 97 | 2016-11-15T14:44:23.000Z | 2022-03-13T18:09:15.000Z | gui/accounts/utils.py | klebed/esdc-ce | 2c9e4591f344247d345a83880ba86777bb794460 | [
"Apache-2.0"
] | 334 | 2016-11-17T19:56:57.000Z | 2022-03-18T10:45:53.000Z | gui/accounts/utils.py | klebed/esdc-ce | 2c9e4591f344247d345a83880ba86777bb794460 | [
"Apache-2.0"
] | 33 | 2017-01-02T16:04:13.000Z | 2022-02-07T19:20:24.000Z | from django.contrib.gis.geoip import GeoIP
from django.utils.translation import get_language
from django.core.cache import cache
from logging import getLogger
# noinspection PyProtectedMember
from phonenumbers.data import _COUNTRY_CODE_TO_REGION_CODE
from pytz import country_timezones
logger = getLogger(__name__)
def get_client_ip(request):
"""
http://stackoverflow.com/questions/4581789/how-do-i-get-user-ip-address-in-django
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_geoip(request):
"""
Return GeoIP.country dictionary, queried by request IP address.
"""
ip = get_client_ip(request)
geoip = GeoIP()
if ip:
ret = geoip.country(ip)
logger.debug('GeoIP detection on IP: %s with result: %s', ip, ret)
return ret
else:
return {'country_code': None, 'country_name': None}
def get_time_zone(country_code):
"""
Return time zone for country.
"""
try:
return country_timezones[country_code][0]
except KeyError:
return None
def get_phone_prefix(country_code):
"""
Return international phone country prefix.
"""
# noinspection PyCompatibility
for prefix, countries in _COUNTRY_CODE_TO_REGION_CODE.iteritems():
if country_code in countries:
return '+%d' % prefix
return ''
def get_initial_data(request):
"""Initial data for registration page"""
dc_settings = request.dc.settings
initial = {
'language': get_language(),
'country': dc_settings.PROFILE_COUNTRY_CODE_DEFAULT,
'phone': dc_settings.PROFILE_PHONE_PREFIX_DEFAULT,
'time_zone': dc_settings.PROFILE_TIME_ZONE_DEFAULT,
}
# This code should be bullet proof. We don't want to fail a registration because of some geo detection.
try:
country = get_geoip(request)['country_code']
if not country:
country = dc_settings.PROFILE_COUNTRY_CODE_DEFAULT
phone = get_phone_prefix(country)
if not phone:
phone = dc_settings.PROFILE_PHONE_PREFIX_DEFAULT
time_zone = get_time_zone(country)
if not time_zone:
time_zone = dc_settings.PROFILE_TIME_ZONE_DEFAULT
except Exception as ex:
logger.error('Registration GEO detection problem')
logger.exception(ex)
else:
initial['phone'] = phone
initial['country'] = country
initial['timezone'] = time_zone
return initial
def generate_key(request, key, view_type):
"""
Generate key from username unique per type, and return default timeout in seconds
"""
if view_type == 'login':
return 'delay_login__' + get_client_ip(request) + '_' + key, 45
elif view_type == 'forgot':
return 'delay_forgot_password__' + get_client_ip(request) + '_' + key, 150
else:
return 'delay_generic__' + get_client_ip(request) + '_' + key, 60
def get_attempts_from_cache(key):
attempts = cache.get(key)
if attempts:
return int(attempts)
return 0
def set_attempts_to_cache(key, timeout=30):
attempts = get_attempts_from_cache(key) + 1
timeout *= attempts
cache.set(key, attempts, timeout)
return attempts, timeout
def clear_attempts_cache(request, key):
gen_key, timeout = generate_key(request, key, 'login')
cache.delete(gen_key)
gen_key, timeout = generate_key(request, key, 'forgot')
cache.delete(gen_key)
| 27.661538 | 107 | 0.675473 | from django.contrib.gis.geoip import GeoIP
from django.utils.translation import get_language
from django.core.cache import cache
from logging import getLogger
from phonenumbers.data import _COUNTRY_CODE_TO_REGION_CODE
from pytz import country_timezones
logger = getLogger(__name__)
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_geoip(request):
ip = get_client_ip(request)
geoip = GeoIP()
if ip:
ret = geoip.country(ip)
logger.debug('GeoIP detection on IP: %s with result: %s', ip, ret)
return ret
else:
return {'country_code': None, 'country_name': None}
def get_time_zone(country_code):
try:
return country_timezones[country_code][0]
except KeyError:
return None
def get_phone_prefix(country_code):
for prefix, countries in _COUNTRY_CODE_TO_REGION_CODE.iteritems():
if country_code in countries:
return '+%d' % prefix
return ''
def get_initial_data(request):
dc_settings = request.dc.settings
initial = {
'language': get_language(),
'country': dc_settings.PROFILE_COUNTRY_CODE_DEFAULT,
'phone': dc_settings.PROFILE_PHONE_PREFIX_DEFAULT,
'time_zone': dc_settings.PROFILE_TIME_ZONE_DEFAULT,
}
try:
country = get_geoip(request)['country_code']
if not country:
country = dc_settings.PROFILE_COUNTRY_CODE_DEFAULT
phone = get_phone_prefix(country)
if not phone:
phone = dc_settings.PROFILE_PHONE_PREFIX_DEFAULT
time_zone = get_time_zone(country)
if not time_zone:
time_zone = dc_settings.PROFILE_TIME_ZONE_DEFAULT
except Exception as ex:
logger.error('Registration GEO detection problem')
logger.exception(ex)
else:
initial['phone'] = phone
initial['country'] = country
initial['timezone'] = time_zone
return initial
def generate_key(request, key, view_type):
if view_type == 'login':
return 'delay_login__' + get_client_ip(request) + '_' + key, 45
elif view_type == 'forgot':
return 'delay_forgot_password__' + get_client_ip(request) + '_' + key, 150
else:
return 'delay_generic__' + get_client_ip(request) + '_' + key, 60
def get_attempts_from_cache(key):
attempts = cache.get(key)
if attempts:
return int(attempts)
return 0
def set_attempts_to_cache(key, timeout=30):
attempts = get_attempts_from_cache(key) + 1
timeout *= attempts
cache.set(key, attempts, timeout)
return attempts, timeout
def clear_attempts_cache(request, key):
gen_key, timeout = generate_key(request, key, 'login')
cache.delete(gen_key)
gen_key, timeout = generate_key(request, key, 'forgot')
cache.delete(gen_key)
| true | true |
1c3c3ae895ac2f9f08901e9629374a47b3329275 | 4,593 | py | Python | 1-Chainlet/rewrite_from_java/FeatureExtractor.py | wang-yuhao/On-the-topological-propertyof-dynamic-transaction-graph | 8dc8c3870befb82581099e3a6edc9f9734c23f31 | [
"MIT"
] | 1 | 2021-01-13T20:54:18.000Z | 2021-01-13T20:54:18.000Z | 1-Chainlet/rewrite_from_java/FeatureExtractor.py | wang-yuhao/On-the-topological-propertyof-dynamic-transaction-graph | 8dc8c3870befb82581099e3a6edc9f9734c23f31 | [
"MIT"
] | null | null | null | 1-Chainlet/rewrite_from_java/FeatureExtractor.py | wang-yuhao/On-the-topological-propertyof-dynamic-transaction-graph | 8dc8c3870befb82581099e3a6edc9f9734c23f31 | [
"MIT"
] | 1 | 2020-12-03T10:30:53.000Z | 2020-12-03T10:30:53.000Z | class FeatureExtractor:
def __init__(self):
preprocess = False
data_dir = "H:/data/createddata/feature/"
os.mkdirs(data_dir)
years = {2016, 2017}
for year in years:
splitFiles(year)
print("year\tday\tmeanValue\tmedianValue\thoMedian\tmeanDegree\tmedianDegree\taddCount\ttxCount")
for year in years:
for day in range(366):
br = open(data_dir + year + "_" + day + ".txt", "r")
# DescriptiveStatistics amounts = new DescriptiveStatistics();
amounts = []
hourlyTx = {}
inDegrees = {}
outDegrees = {}
addresses = []
for line in br.readline():
arr = line.split("\t")
prefix = arr[0]
tx = arr[2]
time = int(arr[1])
blockDate = datetime.datetime.fromtimestamp(time)
thishour = blockDate.hour
if(thishour not in hourlyTx):
hourlyTx[thishour] = 0
hourlyTx[thishour] = hourlyTx[thishour] + 1
if(prefix.lower() == "i"):
inDegrees[tx] = (len(arr) - 3) / 2
elif(prefix.lower() == "o"):
outDegrees[tx] = (len(arr) - 3) / 2
amount = 0
for i in range(3, len(arr) - 1, 2):
amount = amount + int(arr[i+1])
# hashset?
addresses.append(arr[i])
amounts.append(amount)
statistics_amount = np.array(amounts)
meanValue = np.mean(statistics_amount)
medianValue = np.percentile(statistics_amount, 50)
hotx = []
for v in hourlyTx.values():
hoTx.append(v)
statistics_hoTx = np.array(hoTx)
hoMedian = np.percentile(statistics_hoTx, 50)
degrees = []
for tx in inDegrees.keys():
if (tx in outDegrees):
degree = inDegrees[tx]
for f in range(1,outDegrees[tx]):
degrees.append(degree)
meanDegree = np.mean(degrees)
medianDegree = np.percentile(degrees, 50)
addCount = len(addresses)
txCount = len(inDegrees)
print(str(year) + "\t" + str(day) + "\t" + str(meanValue) + "\t" + str(medianValue) + "\t" + str(hoMedian) + "\t" + str(meanDegree) + "\t" + str(medianDegree) + "\t" + str(addCount) + "\t" + str(txCount))
def splitFiles(refYear):
content = {}
# read input and output data from these files
f = ["H:/data/createddata/txInputs.txt", "H:/data/createddata/txOutputs.txt"]
for fileName in f:
substring = fileName.substring(26,27)
inBr = open(fileName, "r")
txIds = {}
line = ""
l = 0
for line in inBr.readline():
l = l + 1
if(l % 100000 == 0):
print("l: ", l)
if(len(line) < 10):
continue
arr = line.split("\t")
time = int(arr[0])
blockDate = datetime.datetime.fromtimestamp(time)
year = blockDate.year
if(year == refYear):
tx = arr[1]
day = blockDate.getDayOfYear()
if(day not in content):
content[day] = ""
content[day] = content[day] + substring + "\t" + line + "\r\n"
if(len(content[day]) > 100000):
write(year, day, content[day])
content.pop(day, None)
for c in content.keys():
write(refYear, c, content[c])
def write(year, day, stringBuffer):
wr = open("H:/data/createddata/feature/" + year + "_" + day + ".txt", "a")
wr.write(stringBuffer)
| 41.754545 | 221 | 0.406924 | class FeatureExtractor:
def __init__(self):
preprocess = False
data_dir = "H:/data/createddata/feature/"
os.mkdirs(data_dir)
years = {2016, 2017}
for year in years:
splitFiles(year)
print("year\tday\tmeanValue\tmedianValue\thoMedian\tmeanDegree\tmedianDegree\taddCount\ttxCount")
for year in years:
for day in range(366):
br = open(data_dir + year + "_" + day + ".txt", "r")
amounts = []
hourlyTx = {}
inDegrees = {}
outDegrees = {}
addresses = []
for line in br.readline():
arr = line.split("\t")
prefix = arr[0]
tx = arr[2]
time = int(arr[1])
blockDate = datetime.datetime.fromtimestamp(time)
thishour = blockDate.hour
if(thishour not in hourlyTx):
hourlyTx[thishour] = 0
hourlyTx[thishour] = hourlyTx[thishour] + 1
if(prefix.lower() == "i"):
inDegrees[tx] = (len(arr) - 3) / 2
elif(prefix.lower() == "o"):
outDegrees[tx] = (len(arr) - 3) / 2
amount = 0
for i in range(3, len(arr) - 1, 2):
amount = amount + int(arr[i+1])
addresses.append(arr[i])
amounts.append(amount)
statistics_amount = np.array(amounts)
meanValue = np.mean(statistics_amount)
medianValue = np.percentile(statistics_amount, 50)
hotx = []
for v in hourlyTx.values():
hoTx.append(v)
statistics_hoTx = np.array(hoTx)
hoMedian = np.percentile(statistics_hoTx, 50)
degrees = []
for tx in inDegrees.keys():
if (tx in outDegrees):
degree = inDegrees[tx]
for f in range(1,outDegrees[tx]):
degrees.append(degree)
meanDegree = np.mean(degrees)
medianDegree = np.percentile(degrees, 50)
addCount = len(addresses)
txCount = len(inDegrees)
print(str(year) + "\t" + str(day) + "\t" + str(meanValue) + "\t" + str(medianValue) + "\t" + str(hoMedian) + "\t" + str(meanDegree) + "\t" + str(medianDegree) + "\t" + str(addCount) + "\t" + str(txCount))
def splitFiles(refYear):
content = {}
f = ["H:/data/createddata/txInputs.txt", "H:/data/createddata/txOutputs.txt"]
for fileName in f:
substring = fileName.substring(26,27)
inBr = open(fileName, "r")
txIds = {}
line = ""
l = 0
for line in inBr.readline():
l = l + 1
if(l % 100000 == 0):
print("l: ", l)
if(len(line) < 10):
continue
arr = line.split("\t")
time = int(arr[0])
blockDate = datetime.datetime.fromtimestamp(time)
year = blockDate.year
if(year == refYear):
tx = arr[1]
day = blockDate.getDayOfYear()
if(day not in content):
content[day] = ""
content[day] = content[day] + substring + "\t" + line + "\r\n"
if(len(content[day]) > 100000):
write(year, day, content[day])
content.pop(day, None)
for c in content.keys():
write(refYear, c, content[c])
def write(year, day, stringBuffer):
wr = open("H:/data/createddata/feature/" + year + "_" + day + ".txt", "a")
wr.write(stringBuffer)
| true | true |
1c3c3b9273adc651b96a12e16847078408e659b1 | 799 | py | Python | tests/conftest.py | owlint/Portainer2Git | fa5592ce75a9e312b6b4763b18b2d8b3b419333f | [
"Apache-2.0"
] | 1 | 2020-05-03T14:00:46.000Z | 2020-05-03T14:00:46.000Z | tests/conftest.py | lauevrar77/Portainer2Git | fa5592ce75a9e312b6b4763b18b2d8b3b419333f | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | lauevrar77/Portainer2Git | fa5592ce75a9e312b6b4763b18b2d8b3b419333f | [
"Apache-2.0"
] | null | null | null | import mongomock
import pymongo
import sys
from unittest.mock import MagicMock
client = mongomock.MongoClient()
mock = MagicMock()
sys.modules["pymongo"] = mock
mock.MongoClient = MagicMock(return_value=client)
mock.ASCENDING = pymongo.ASCENDING
mock.DESCENDING = pymongo.DESCENDING
from infrastructure.persistance.persistance import Persistance
from infrastructure.domain_event_listeners.domain_event_listeners import (
DomainEventListeners,
)
from infrastructure.services.services import Services
from pygrate import migrate, seed
projections = []
for projection in Persistance.projections:
projections.append(projection())
for listener in DomainEventListeners.domain_event_listeners:
Services.domain_event_publisher().register_listener(listener())
migrate.apply()
seed.apply()
| 25.774194 | 74 | 0.822278 | import mongomock
import pymongo
import sys
from unittest.mock import MagicMock
client = mongomock.MongoClient()
mock = MagicMock()
sys.modules["pymongo"] = mock
mock.MongoClient = MagicMock(return_value=client)
mock.ASCENDING = pymongo.ASCENDING
mock.DESCENDING = pymongo.DESCENDING
from infrastructure.persistance.persistance import Persistance
from infrastructure.domain_event_listeners.domain_event_listeners import (
DomainEventListeners,
)
from infrastructure.services.services import Services
from pygrate import migrate, seed
projections = []
for projection in Persistance.projections:
projections.append(projection())
for listener in DomainEventListeners.domain_event_listeners:
Services.domain_event_publisher().register_listener(listener())
migrate.apply()
seed.apply()
| true | true |
1c3c3c3f5de7ac97e3100fb5bc9489bb547d89ef | 8,851 | py | Python | TrainingExtensions/common/src/python/aimet_common/cache.py | lipovsek/aimet | 236fb02cc6c45e65c067030416c49a09ace82045 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/common/src/python/aimet_common/cache.py | lipovsek/aimet | 236fb02cc6c45e65c067030416c49a09ace82045 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/common/src/python/aimet_common/cache.py | lipovsek/aimet | 236fb02cc6c45e65c067030416c49a09ace82045 | [
"BSD-3-Clause"
] | null | null | null | # /usr/bin/env python3.6
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
"""Cache Implementation"""
import abc
import contextlib
import functools
import os
import pickle
from typing import Any, Callable, Optional, Generic, TypeVar
from aimet_common.utils import AimetLogger
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
class CacheMiss(FileNotFoundError):
"""Exception to be raised upon cache miss."""
T = TypeVar("T")
class SerializationProtocolBase(abc.ABC, Generic[T]): # pylint: disable
"""Serialization protocol for objects of type T.
Invariants:
- if type(obj) == T: # NOTE: The types should be EXACTLY the same.
self.save(obj, working_dir, filename_prefix);
assert obj == self.load(working_dir, filename_prefix)
- otherwise:
self.save(obj, ...) should raise TypeError
"""
@abc.abstractmethod
def save(self, obj: T, working_dir: str, filename_prefix: str) -> None:
"""
Save an object of type T.
:param obj: Object to save.
:param working_dir: Directory to save the file.
:param filename_prefix: File name prefix.
:raises: TypeError if obj is not of type T.
"""
@abc.abstractmethod
def load(self, working_dir: str, filename_prefix: str) -> T:
"""
Load the saved object.
:param working_dir: Directory to save the file.
:param filename_prefix: File name prefix.
:return: Loaded object.
:raises: Cache miss if the combination of working_dir and
filename_prefix fails to find a previously saved cache entry.
"""
@classmethod
def _type_error(cls, obj, expected_type):
"""Helper funtion for creating a commonly used type error."""
obj_type = type(obj)
msg = f"{cls.__name__} cannot serialize an object of type {obj_type} "\
f"(expected type: {expected_type})."
return TypeError(msg)
class _PickleSerializationProtocol(SerializationProtocolBase):
"""Serialization protocol for pickle-serializable objects"""
@classmethod
def _get_filename(cls, working_dir, filename_prefix):
"""Get the name of the file to save the pickle-serialized results to."""
return os.path.join(working_dir, f"{filename_prefix}.pkl")
def save(self, obj: Any, working_dir: str, filename_prefix: str) -> None:
"""
Save a pickle-serializable object.
:param obj: Object to save.
:param working_dir: Directory to save the file.
:param filename_prefix: File name prefix.
:raises: TypeError if obj is not pickle-serializable.
"""
filename = self._get_filename(working_dir, filename_prefix)
with open(filename, "wb") as f:
try:
pickle.dump(obj, f)
except pickle.PicklingError as e:
raise TypeError from e
def load(self, working_dir: str, filename_prefix: str) -> Any:
"""
Load the saved object.
:param working_dir: Directory to save the file.
:param filename_prefix: File name prefix.
:return: Loaded object.
:raises: Cache miss if the combination of working_dir and
filename_prefix fails to find a previously saved cache entry.
"""
filename = self._get_filename(working_dir, filename_prefix)
if os.path.exists(filename):
# Cached file exists (cache hit). Load from cache.
with open(filename, "rb") as f:
return pickle.load(f)
raise CacheMiss
class Cache:
"""
Cache that performs return value caching.
Being a return value cache, one should take extra care before applying it.
Only to the funcitons that satisfy all of the following conditions can we
safely apply return value caching:
- The function should be STATELESS.
The return value of the function should depend only on the inputs and
not on any other external states, including the filesystem states.
- The function should be IDEMPOTENT.
Calling the function with the identical input should always return the same outputs.
For example, `fold_all_batchnorms` doesn't satisfy this condition because
calling `fold_all_batchnorms` with the identical model iteratively will
return a list of folded pairs only at the first call, and never again.
Additional pitfall:
- The original object and the one loaded from the cache are EQUAL but NOT IDENTICAL.
This is because the caching mechanism is fundamentally based on serialization.
"""
def __init__(self):
self._cache_dir = None
def mark(self, cache_key: str, protocol: SerializationProtocolBase = None):
"""
Mark functions that are subject to caching.
The functions decorated with this mark will save/load the outputs
to/from the cache directory if caching is enabled.
:param cache_key: Used as a prefix of the name of the file that
caches the results of the decorated function.
:param protocol: Serialization protocol for the return values of the function.
By default, we use pickle serialization protocol.
:return: A decorator that registers the decorated functions.
"""
# Use pickle serialization by default.
protocol = protocol or _PickleSerializationProtocol()
def _wrap(fn: Callable, cache_key: str):
@functools.wraps(fn)
def caching_helper(*args, **kwargs):
# If caching is disabled, evalaute the result.
if self._cache_dir is None:
return fn(*args, **kwargs)
working_dir = self._cache_dir
filename_prefix = cache_key
try:
# Try loading the previously evaluated result from cache.
_logger.debug("Loading result of %s from %s.", cache_key, self._cache_dir)
return protocol.load(working_dir, filename_prefix)
except CacheMiss:
_logger.debug("Cache miss.")
ret = fn(*args, **kwargs)
_logger.debug("Caching result of %s to %s.", cache_key, self._cache_dir)
protocol.save(ret, working_dir, filename_prefix)
return ret
return caching_helper
return lambda fn: _wrap(fn, cache_key)
@contextlib.contextmanager
def enable(self, cache_dir: Optional[str]):
"""
Enable caching.
:param cache_dir: Directory to read/save the cached results from/to.
"""
self._cache_dir = cache_dir
try:
if self._cache_dir is not None:
os.makedirs(self._cache_dir, exist_ok=True)
_logger.info("AutoQuant caching is enabled. Cache directory: %s", self._cache_dir)
yield
finally:
self._cache_dir = None
| 40.231818 | 98 | 0.650322 |
import abc
import contextlib
import functools
import os
import pickle
from typing import Any, Callable, Optional, Generic, TypeVar
from aimet_common.utils import AimetLogger
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
class CacheMiss(FileNotFoundError):
T = TypeVar("T")
class SerializationProtocolBase(abc.ABC, Generic[T]):
@abc.abstractmethod
def save(self, obj: T, working_dir: str, filename_prefix: str) -> None:
@abc.abstractmethod
def load(self, working_dir: str, filename_prefix: str) -> T:
@classmethod
def _type_error(cls, obj, expected_type):
obj_type = type(obj)
msg = f"{cls.__name__} cannot serialize an object of type {obj_type} "\
f"(expected type: {expected_type})."
return TypeError(msg)
class _PickleSerializationProtocol(SerializationProtocolBase):
@classmethod
def _get_filename(cls, working_dir, filename_prefix):
return os.path.join(working_dir, f"{filename_prefix}.pkl")
def save(self, obj: Any, working_dir: str, filename_prefix: str) -> None:
filename = self._get_filename(working_dir, filename_prefix)
with open(filename, "wb") as f:
try:
pickle.dump(obj, f)
except pickle.PicklingError as e:
raise TypeError from e
def load(self, working_dir: str, filename_prefix: str) -> Any:
filename = self._get_filename(working_dir, filename_prefix)
if os.path.exists(filename):
with open(filename, "rb") as f:
return pickle.load(f)
raise CacheMiss
class Cache:
def __init__(self):
self._cache_dir = None
def mark(self, cache_key: str, protocol: SerializationProtocolBase = None):
protocol = protocol or _PickleSerializationProtocol()
def _wrap(fn: Callable, cache_key: str):
@functools.wraps(fn)
def caching_helper(*args, **kwargs):
if self._cache_dir is None:
return fn(*args, **kwargs)
working_dir = self._cache_dir
filename_prefix = cache_key
try:
_logger.debug("Loading result of %s from %s.", cache_key, self._cache_dir)
return protocol.load(working_dir, filename_prefix)
except CacheMiss:
_logger.debug("Cache miss.")
ret = fn(*args, **kwargs)
_logger.debug("Caching result of %s to %s.", cache_key, self._cache_dir)
protocol.save(ret, working_dir, filename_prefix)
return ret
return caching_helper
return lambda fn: _wrap(fn, cache_key)
@contextlib.contextmanager
def enable(self, cache_dir: Optional[str]):
self._cache_dir = cache_dir
try:
if self._cache_dir is not None:
os.makedirs(self._cache_dir, exist_ok=True)
_logger.info("AutoQuant caching is enabled. Cache directory: %s", self._cache_dir)
yield
finally:
self._cache_dir = None
| true | true |
1c3c3c9d896f02485afef951ea488800262d55bd | 126 | py | Python | seahub/related_files/apps.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 101 | 2021-05-16T06:00:03.000Z | 2021-12-01T02:02:29.000Z | seahub/related_files/apps.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | null | null | null | seahub/related_files/apps.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 2 | 2021-10-19T05:22:40.000Z | 2022-01-12T03:55:34.000Z | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class RelatedFilesConfig(AppConfig):
name = 'related_files'
| 14 | 36 | 0.698413 |
from django.apps import AppConfig
class RelatedFilesConfig(AppConfig):
name = 'related_files'
| true | true |
1c3c3e20835e09ab095ea418937d485023f797ef | 1,193 | py | Python | git_project_updater_cli/commands/command.py | Slidem/git-project-updater | 2d0e91fab26f6c5e7cf5c92c9f0ee78925fb33b5 | [
"MIT"
] | null | null | null | git_project_updater_cli/commands/command.py | Slidem/git-project-updater | 2d0e91fab26f6c5e7cf5c92c9f0ee78925fb33b5 | [
"MIT"
] | null | null | null | git_project_updater_cli/commands/command.py | Slidem/git-project-updater | 2d0e91fab26f6c5e7cf5c92c9f0ee78925fb33b5 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from git_project_updater_business.settings.settings_repository import SettingsRepository
from git_project_updater_business.repository.projects_repository import ProjectsRepository
from git_project_updater_business.service.projects_service import ProjectsService
from git_project_updater_business.service.git_service import GitService
from git_project_updater_business.scanners.projects_scanner_factory import ProjectScannerFactory
from git_project_updater_cli.commands import command_factory
class Command(ABC):
@abstractmethod
def execute(self):
pass
@property
@abstractmethod
def code(self):
pass
@property
def settings_repository(self):
return SettingsRepository.instance()
@property
def projects_repository(self):
return ProjectsRepository.instance(self.settings_repository, ProjectScannerFactory.instance())
@property
def projects_service(self):
return ProjectsService.instance(self.projects_repository)
@property
def git_service(self):
return GitService.get_instance(self.projects_repository)
def __str__(self):
"Unkown command"
| 29.825 | 102 | 0.789606 | from abc import ABC, abstractmethod
from git_project_updater_business.settings.settings_repository import SettingsRepository
from git_project_updater_business.repository.projects_repository import ProjectsRepository
from git_project_updater_business.service.projects_service import ProjectsService
from git_project_updater_business.service.git_service import GitService
from git_project_updater_business.scanners.projects_scanner_factory import ProjectScannerFactory
from git_project_updater_cli.commands import command_factory
class Command(ABC):
@abstractmethod
def execute(self):
pass
@property
@abstractmethod
def code(self):
pass
@property
def settings_repository(self):
return SettingsRepository.instance()
@property
def projects_repository(self):
return ProjectsRepository.instance(self.settings_repository, ProjectScannerFactory.instance())
@property
def projects_service(self):
return ProjectsService.instance(self.projects_repository)
@property
def git_service(self):
return GitService.get_instance(self.projects_repository)
def __str__(self):
| true | true |
1c3c3e4d6c7215503da9378d3ae5a54d41ecbbdf | 14,540 | py | Python | pywgrib2_xr/template.py | yt87/pywgrib2_xr | 5c49eaaee12948ecc2f2aff526a9e51e6d4d98b5 | [
"0BSD"
] | 11 | 2021-01-05T03:26:51.000Z | 2022-02-15T02:44:39.000Z | pywgrib2_xr/template.py | yt87/pywgrib2_xr | 5c49eaaee12948ecc2f2aff526a9e51e6d4d98b5 | [
"0BSD"
] | 2 | 2020-12-18T02:35:08.000Z | 2021-07-11T13:01:53.000Z | pywgrib2_xr/template.py | yt87/pywgrib2_xr | 5c49eaaee12948ecc2f2aff526a9e51e6d4d98b5 | [
"0BSD"
] | null | null | null | from datetime import datetime, timedelta
from functools import partial
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
)
# For older Pythons
try:
from typing import TypedDict
except ImportError:
from mypy_extensions import TypedDict
try:
from numpy.typing import ArrayLike
except ImportError:
ArrayLike = Any
import numpy as np
from dask.base import tokenize
from . import __version__, _Variable
from .inventory import (
MetaData,
item_match,
load_or_make_inventory,
)
from .grids import grid_fromgds
# FIME: remove?
# wgrib2 returns C float arrays
# DTYPE = np.dtype("float32")
# From wgrib2 CodeTable_4.10.dat
# Spaces are intentional
TIME_MODS = [
" ave ",
" acc ",
" max ",
" min ",
" last-first ",
" RMS ",
" StdDev ",
" covar ",
" first-last ",
" ratio ",
" standardized anomaly ",
" summation ",
]
class VertLevel(NamedTuple):
type: str
reverse: bool # sort order
units: str
# Possible 3-D variables
VERT_LEVELS: Dict[int, VertLevel] = {
100: VertLevel("isobaric", True, "Pa"),
102: VertLevel("height_asl", False, "m"),
103: VertLevel("height_agl", False, "m"),
104: VertLevel("sigma", True, ""),
105: VertLevel("hybrid", False, ""),
}
# Used to set dataset attributes
class CommonInfo(NamedTuple):
reftime: datetime
centre: str
subcentre: str
gdtnum: int
gdtmpl: List[int]
def check_item(self, item: MetaData) -> None:
if item.reftime != self.reftime:
raise ValueError(
"Reference times differ: {!r} != {!r}".format(
self.reftime, item.reftime
)
)
if item.gdtnum != self.gdtnum or item.gdtmpl != self.gdtmpl:
raise ValueError(
"Grids differ: {:d}: {!r} != {:d}: {!r}".format(
self.gdtnum, self.gdtmpl, item.gdtnum, item.gdtmpl
)
)
class VarSpecs(NamedTuple):
time_coord: str # forecast time coordinate
level_coord: Optional[str] # level (type from VertLevel) coordinate
dims: Sequence[str] # dimension names
shape: Tuple[int, ...] # array shape
attrs: Dict[str, Any] # attributes
# Containers used to construct VarSpecs
class VarInfo(TypedDict):
long_name: str
units: str
fcst_time: Set[timedelta]
level: VertLevel
level_value: Set[float]
class TimeCoord(NamedTuple):
name: str
values: ArrayLike
class LevelCoord(NamedTuple):
level: VertLevel
name: str
values: ArrayLike
def item_to_varname(item: MetaData, vert_levels: Dict[int, VertLevel]) -> str:
def _level() -> str:
# return lvl["type"] if (lvl := vert_levels.get(item.level_code)) else ""
# For Python < 3.8 and flake8
lvl = vert_levels.get(item.bot_level_code)
return lvl.type if lvl else item.level_str
def _time() -> str:
td = item.end_ft - item.start_ft
if td <= timedelta(0):
return ""
# skip values like "102 hour fcst", consider only periods
for tm in TIME_MODS:
if tm in item.time_str:
days, hours, minutes = (
td.days,
td.seconds // 3600,
(td.seconds // 60) % 60,
)
if minutes:
minutes += 60 * hours
return "{:d}_min_{:s}".format(minutes, tm.strip())
elif hours:
hours += 24 * days
return "{:d}_hour_{:s}".format(hours, tm.strip())
elif days:
return "{:d}_day_{:s}".format(days, tm.strip())
return ""
parts = (item.varname, _level(), _time())
return ".".join([x for x in parts if x]).replace(" ", "_")
class Template:
"""Defines dataset structure.
This is an opaque class instantiated by :py:func:`make_template`.
It's purpose is to define Dataset structure and avoid complex merges.
"""
def __init__(
self,
commoninfo: CommonInfo,
var_info_map: Dict[str, VarInfo],
vert_level_map: Dict[int, VertLevel],
predicates: Optional[Sequence[Callable[[MetaData], bool]]] = None,
):
if predicates is None:
predicates = []
else:
predicates = list(predicates)
self.commoninfo = commoninfo
self.grid = grid_fromgds(commoninfo.gdtnum, commoninfo.gdtmpl)
self.coords = {k: _Variable(*v) for k, v in self.grid.coords.items()}
level_dims, level_coords, level_var2coord = self._build_level_coords(
var_info_map
)
self.coords.update(level_coords)
time_dims, time_coords, time_var2coord = self._build_time_coords(var_info_map)
self.coords.update(time_coords)
self.var_specs = self._build_var_specs(
var_info_map, time_dims, time_var2coord, level_dims, level_var2coord
)
self.attrs = self._build_attrs()
self.item_to_varname = partial(item_to_varname, vert_levels=vert_level_map)
predicates.append(self._same_grid)
self.item_match = partial(item_match, predicates=predicates)
def __repr__(self):
summary = [
"Coordinates:",
repr(self.coords),
# "Variable names:",
# repr(self._var_spec.),
"Variable specs",
repr(self.var_specs),
"Attributes:",
repr(self.attrs),
]
return "\n".join(summary)
@property
def var_names(self):
return sorted(list(self.var_specs.keys()))
@staticmethod
def _build_level_coords(
var_info_map: Dict[str, VarInfo]
) -> Tuple[Dict[str, int], Dict[str, _Variable], Dict[str, str]]:
def _name(v: Sequence[Any]) -> str:
return tokenize(*v)
def _sort(v: VarInfo) -> LevelCoord:
vert_level = v["level"]
coords = sorted(v["level_value"], reverse=vert_level.reverse)
dimname = _name(coords)
return LevelCoord(vert_level, dimname, coords)
def _levels() -> Dict[str, LevelCoord]:
levels = {k: _sort(v) for k, v in var_info_map.items() if v["level"]}
s = set([(v.level, v.name) for v in levels.values()])
names = {
name: "{:s}{:d}".format(level.type, i + 1)
for (i, (level, name)) in enumerate(s)
}
return {
k: LevelCoord(v.level, names[v.name], v.values)
for (k, v) in levels.items()
}
levels = _levels()
coords = {}
dims = {}
var2coord = {}
for k, v in levels.items():
var2coord[k] = v.name
attrs = {
"units": v.level.units,
"axis": "Z",
"positive": "down" if v.level.reverse else "up",
}
coords[v.name] = _Variable((v.name), np.array(v.values), attrs)
dims[v.name] = len(v.values)
return dims, coords, var2coord
@staticmethod
def _build_time_coords(
var_info_map: Dict[str, VarInfo]
) -> Tuple[Dict[str, int], Dict[str, _Variable], Dict[str, str]]:
def _name(v: Sequence[Any]) -> str:
return tokenize(*[t.seconds for t in v])
def _sort(v: VarInfo) -> TimeCoord:
coords = sorted(v["fcst_time"])
dimname = _name(coords)
return TimeCoord(dimname, coords)
# varname -> TimeCoord
def _times() -> Dict[str, TimeCoord]:
times = {k: _sort(v) for k, v in var_info_map.items()}
s = set([v.name for v in times.values()])
# Convert hashes to integers. Sort set to ensure consistent mapping
# Follow Metpy naming: time<N>
names = {n: "time{:d}".format(i + 1) for (i, n) in enumerate(sorted(s))}
return {k: TimeCoord(names[v.name], v.values) for (k, v) in times.items()}
times = _times()
# Squeeze only when all time dimensions are == 1.
squeeze = max([len(v.values) for v in times.values()]) == 1
coords = {}
dims = {}
var2coord = {}
attrs = {"standard_name": "forecast_period"}
for k, v in times.items():
var2coord[k] = v.name
if squeeze:
coords[v.name] = _Variable((), np.array(v.values[0]), attrs)
else:
coords[v.name] = _Variable((v.name), np.array(v.values), attrs)
dims[v.name] = len(v.values)
return dims, coords, var2coord
def _build_attrs(self) -> Dict[str, str]:
return {
"Projection": self.grid.cfname,
"Originating centre": self.commoninfo.centre,
"Originating subcentre": self.commoninfo.subcentre,
"History": "Created by pywgrib2_xr-{:s}".format(__version__),
}
def _build_var_specs(
self,
var_info_map: Dict[str, VarInfo],
time_dims: Dict[str, int],
time_var2coord: Dict[str, str],
level_dims: Dict[str, int],
level_var2coord: Dict[str, str],
) -> Dict[str, VarSpecs]:
def _make_specs(k, v):
time_coord = time_var2coord[k]
if time_coord in time_dims:
dims = [time_coord]
shape = [time_dims[time_coord]]
else:
dims = []
shape = []
if v["level"]:
level_coord = level_var2coord[k]
dims.append(level_coord)
shape.append(level_dims[level_coord])
else:
level_coord = None
dims.extend(list(self.grid.dims))
shape.extend(self.grid.shape)
attrs = dict(
short_name=k.split(".")[0],
long_name=v["long_name"],
units=v["units"],
grid_mapping=self.grid.cfname,
)
return VarSpecs(time_coord, level_coord, dims, shape, attrs)
return {k: _make_specs(k, v) for k, v in var_info_map.items()}
def _same_grid(self, i: MetaData) -> bool:
return i.gdtnum == self.commoninfo.gdtnum and i.gdtmpl == self.commoninfo.gdtmpl
def make_template(
files,
*predicates,
vertlevels=None,
reftime=None,
save=False,
invdir=None,
):
"""Creates template from GRIB2 files.
Parameters
----------
files : str of iterable of str.
List of GRIB files containing messages with unique `reftime`.
For example, files for all or a subset of forecast times.
predicates : callable
Zero or more boolean functions to select desired variables.
A variable is selected if one of predicates returns True.
The default is None, means matches everything.
vertlevels : str or list of str, optional.
One of {'isobaric', 'height_asl', 'height_agl', 'sigma', 'hybrid'}.
Specifies vertical coordinates.
If None (default), all data variables will be 2-D in space.
reftime : str or datetime, optional
Reference time. Default is None. A string must be in the ISO format:
YYYY-mm-ddTHH:MM:SS.
This argument must be specified for files with multiple reference times.
save : bool, optional.
If True, inventory will be saved to a file. File name and location depends on
'invdir'. If 'invdir' is given, the inventory file will be a hashed path
of the GRIB file written to 'invdir'. Otherwise file name will be that of
the GRIB file, with appended extension ``pyinv``.
The intention is to allow for temporary inventory when GRIB files are on
a read-only medium. Default is False.
invdir : str, optional
Location of inventory files.
Returns
-------
Template
Instantiated class defining dataset structure.
None is returned if no messages match the selection criteria.
Examples
--------
The two equivalent functions select temperature at pressure level:
>>> lambda x: x.varname == 'TMP' and x.bot_level_code == 100 and x.top_level_code = 255
>>> lambda x: x.varname == 'TMP' and re.match(r'\\d+ mb', x.level_str)
To select accumulated 3 hour precipitation, define function `match_pcp3`:
>>> def match_pcp3(x):
>>> return x.varname == 'APCP' and x.end_ft - x.start_ft == timedelta(hours=3)
"""
if isinstance(files, str):
files = [files]
if not vertlevels:
vertlevels = []
elif isinstance(vertlevels, str):
vertlevels = [vertlevels]
if isinstance(reftime, str):
reftime = datetime.fromisoformat(reftime)
vert_level_map = {c: v for c, v in VERT_LEVELS.items() if v.type in vertlevels}
var_info_map: Dict[str, VarInfo] = {}
commoninfo = None
for file in files:
inventory = load_or_make_inventory(file, save=save, directory=invdir)
if not inventory:
continue
matched_items = (i for i in inventory if item_match(i, predicates))
if reftime is not None:
matched_items = (i for i in matched_items if i.reftime == reftime)
for item in matched_items:
if commoninfo:
commoninfo.check_item(item)
else:
# Only regular grids are allowed
if item.npts != item.nx * item.ny:
raise ValueError("Thinned grids are not supported")
commoninfo = CommonInfo(
item.reftime, item.centre, item.subcentre, item.gdtnum, item.gdtmpl
)
varname = item_to_varname(item, vert_level_map)
if varname not in var_info_map:
var_info_map[varname] = {
"long_name": item.long_name,
"units": item.units,
"fcst_time": set(),
"level": vert_level_map.get(item.bot_level_code),
"level_value": set(),
}
# Add time and level values
varinfo = var_info_map[varname] # a reference
varinfo["fcst_time"].add(item.end_ft - item.reftime)
if varinfo["level"]:
varinfo["level_value"].add(item.bot_level_value)
if var_info_map:
return Template(commoninfo, var_info_map, vert_level_map, predicates)
return None
| 33.425287 | 91 | 0.574415 | from datetime import datetime, timedelta
from functools import partial
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
)
try:
from typing import TypedDict
except ImportError:
from mypy_extensions import TypedDict
try:
from numpy.typing import ArrayLike
except ImportError:
ArrayLike = Any
import numpy as np
from dask.base import tokenize
from . import __version__, _Variable
from .inventory import (
MetaData,
item_match,
load_or_make_inventory,
)
from .grids import grid_fromgds
TIME_MODS = [
" ave ",
" acc ",
" max ",
" min ",
" last-first ",
" RMS ",
" StdDev ",
" covar ",
" first-last ",
" ratio ",
" standardized anomaly ",
" summation ",
]
class VertLevel(NamedTuple):
type: str
reverse: bool
units: str
VERT_LEVELS: Dict[int, VertLevel] = {
100: VertLevel("isobaric", True, "Pa"),
102: VertLevel("height_asl", False, "m"),
103: VertLevel("height_agl", False, "m"),
104: VertLevel("sigma", True, ""),
105: VertLevel("hybrid", False, ""),
}
class CommonInfo(NamedTuple):
reftime: datetime
centre: str
subcentre: str
gdtnum: int
gdtmpl: List[int]
def check_item(self, item: MetaData) -> None:
if item.reftime != self.reftime:
raise ValueError(
"Reference times differ: {!r} != {!r}".format(
self.reftime, item.reftime
)
)
if item.gdtnum != self.gdtnum or item.gdtmpl != self.gdtmpl:
raise ValueError(
"Grids differ: {:d}: {!r} != {:d}: {!r}".format(
self.gdtnum, self.gdtmpl, item.gdtnum, item.gdtmpl
)
)
class VarSpecs(NamedTuple):
time_coord: str
level_coord: Optional[str]
dims: Sequence[str]
shape: Tuple[int, ...]
attrs: Dict[str, Any]
class VarInfo(TypedDict):
long_name: str
units: str
fcst_time: Set[timedelta]
level: VertLevel
level_value: Set[float]
class TimeCoord(NamedTuple):
name: str
values: ArrayLike
class LevelCoord(NamedTuple):
level: VertLevel
name: str
values: ArrayLike
def item_to_varname(item: MetaData, vert_levels: Dict[int, VertLevel]) -> str:
def _level() -> str:
lvl = vert_levels.get(item.bot_level_code)
return lvl.type if lvl else item.level_str
def _time() -> str:
td = item.end_ft - item.start_ft
if td <= timedelta(0):
return ""
for tm in TIME_MODS:
if tm in item.time_str:
days, hours, minutes = (
td.days,
td.seconds // 3600,
(td.seconds // 60) % 60,
)
if minutes:
minutes += 60 * hours
return "{:d}_min_{:s}".format(minutes, tm.strip())
elif hours:
hours += 24 * days
return "{:d}_hour_{:s}".format(hours, tm.strip())
elif days:
return "{:d}_day_{:s}".format(days, tm.strip())
return ""
parts = (item.varname, _level(), _time())
return ".".join([x for x in parts if x]).replace(" ", "_")
class Template:
def __init__(
self,
commoninfo: CommonInfo,
var_info_map: Dict[str, VarInfo],
vert_level_map: Dict[int, VertLevel],
predicates: Optional[Sequence[Callable[[MetaData], bool]]] = None,
):
if predicates is None:
predicates = []
else:
predicates = list(predicates)
self.commoninfo = commoninfo
self.grid = grid_fromgds(commoninfo.gdtnum, commoninfo.gdtmpl)
self.coords = {k: _Variable(*v) for k, v in self.grid.coords.items()}
level_dims, level_coords, level_var2coord = self._build_level_coords(
var_info_map
)
self.coords.update(level_coords)
time_dims, time_coords, time_var2coord = self._build_time_coords(var_info_map)
self.coords.update(time_coords)
self.var_specs = self._build_var_specs(
var_info_map, time_dims, time_var2coord, level_dims, level_var2coord
)
self.attrs = self._build_attrs()
self.item_to_varname = partial(item_to_varname, vert_levels=vert_level_map)
predicates.append(self._same_grid)
self.item_match = partial(item_match, predicates=predicates)
def __repr__(self):
summary = [
"Coordinates:",
repr(self.coords),
"Variable specs",
repr(self.var_specs),
"Attributes:",
repr(self.attrs),
]
return "\n".join(summary)
@property
def var_names(self):
return sorted(list(self.var_specs.keys()))
@staticmethod
def _build_level_coords(
var_info_map: Dict[str, VarInfo]
) -> Tuple[Dict[str, int], Dict[str, _Variable], Dict[str, str]]:
def _name(v: Sequence[Any]) -> str:
return tokenize(*v)
def _sort(v: VarInfo) -> LevelCoord:
vert_level = v["level"]
coords = sorted(v["level_value"], reverse=vert_level.reverse)
dimname = _name(coords)
return LevelCoord(vert_level, dimname, coords)
def _levels() -> Dict[str, LevelCoord]:
levels = {k: _sort(v) for k, v in var_info_map.items() if v["level"]}
s = set([(v.level, v.name) for v in levels.values()])
names = {
name: "{:s}{:d}".format(level.type, i + 1)
for (i, (level, name)) in enumerate(s)
}
return {
k: LevelCoord(v.level, names[v.name], v.values)
for (k, v) in levels.items()
}
levels = _levels()
coords = {}
dims = {}
var2coord = {}
for k, v in levels.items():
var2coord[k] = v.name
attrs = {
"units": v.level.units,
"axis": "Z",
"positive": "down" if v.level.reverse else "up",
}
coords[v.name] = _Variable((v.name), np.array(v.values), attrs)
dims[v.name] = len(v.values)
return dims, coords, var2coord
@staticmethod
def _build_time_coords(
var_info_map: Dict[str, VarInfo]
) -> Tuple[Dict[str, int], Dict[str, _Variable], Dict[str, str]]:
def _name(v: Sequence[Any]) -> str:
return tokenize(*[t.seconds for t in v])
def _sort(v: VarInfo) -> TimeCoord:
coords = sorted(v["fcst_time"])
dimname = _name(coords)
return TimeCoord(dimname, coords)
def _times() -> Dict[str, TimeCoord]:
times = {k: _sort(v) for k, v in var_info_map.items()}
s = set([v.name for v in times.values()])
names = {n: "time{:d}".format(i + 1) for (i, n) in enumerate(sorted(s))}
return {k: TimeCoord(names[v.name], v.values) for (k, v) in times.items()}
times = _times()
squeeze = max([len(v.values) for v in times.values()]) == 1
coords = {}
dims = {}
var2coord = {}
attrs = {"standard_name": "forecast_period"}
for k, v in times.items():
var2coord[k] = v.name
if squeeze:
coords[v.name] = _Variable((), np.array(v.values[0]), attrs)
else:
coords[v.name] = _Variable((v.name), np.array(v.values), attrs)
dims[v.name] = len(v.values)
return dims, coords, var2coord
def _build_attrs(self) -> Dict[str, str]:
return {
"Projection": self.grid.cfname,
"Originating centre": self.commoninfo.centre,
"Originating subcentre": self.commoninfo.subcentre,
"History": "Created by pywgrib2_xr-{:s}".format(__version__),
}
def _build_var_specs(
self,
var_info_map: Dict[str, VarInfo],
time_dims: Dict[str, int],
time_var2coord: Dict[str, str],
level_dims: Dict[str, int],
level_var2coord: Dict[str, str],
) -> Dict[str, VarSpecs]:
def _make_specs(k, v):
time_coord = time_var2coord[k]
if time_coord in time_dims:
dims = [time_coord]
shape = [time_dims[time_coord]]
else:
dims = []
shape = []
if v["level"]:
level_coord = level_var2coord[k]
dims.append(level_coord)
shape.append(level_dims[level_coord])
else:
level_coord = None
dims.extend(list(self.grid.dims))
shape.extend(self.grid.shape)
attrs = dict(
short_name=k.split(".")[0],
long_name=v["long_name"],
units=v["units"],
grid_mapping=self.grid.cfname,
)
return VarSpecs(time_coord, level_coord, dims, shape, attrs)
return {k: _make_specs(k, v) for k, v in var_info_map.items()}
def _same_grid(self, i: MetaData) -> bool:
return i.gdtnum == self.commoninfo.gdtnum and i.gdtmpl == self.commoninfo.gdtmpl
def make_template(
files,
*predicates,
vertlevels=None,
reftime=None,
save=False,
invdir=None,
):
if isinstance(files, str):
files = [files]
if not vertlevels:
vertlevels = []
elif isinstance(vertlevels, str):
vertlevels = [vertlevels]
if isinstance(reftime, str):
reftime = datetime.fromisoformat(reftime)
vert_level_map = {c: v for c, v in VERT_LEVELS.items() if v.type in vertlevels}
var_info_map: Dict[str, VarInfo] = {}
commoninfo = None
for file in files:
inventory = load_or_make_inventory(file, save=save, directory=invdir)
if not inventory:
continue
matched_items = (i for i in inventory if item_match(i, predicates))
if reftime is not None:
matched_items = (i for i in matched_items if i.reftime == reftime)
for item in matched_items:
if commoninfo:
commoninfo.check_item(item)
else:
if item.npts != item.nx * item.ny:
raise ValueError("Thinned grids are not supported")
commoninfo = CommonInfo(
item.reftime, item.centre, item.subcentre, item.gdtnum, item.gdtmpl
)
varname = item_to_varname(item, vert_level_map)
if varname not in var_info_map:
var_info_map[varname] = {
"long_name": item.long_name,
"units": item.units,
"fcst_time": set(),
"level": vert_level_map.get(item.bot_level_code),
"level_value": set(),
}
varinfo = var_info_map[varname]
varinfo["fcst_time"].add(item.end_ft - item.reftime)
if varinfo["level"]:
varinfo["level_value"].add(item.bot_level_value)
if var_info_map:
return Template(commoninfo, var_info_map, vert_level_map, predicates)
return None
| true | true |
1c3c3f457bc93c4d38f911ff7c9e924cbda143ee | 5,918 | py | Python | dvc/command/run.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | 1 | 2019-09-02T00:28:11.000Z | 2019-09-02T00:28:11.000Z | dvc/command/run.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | null | null | null | dvc/command/run.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | 1 | 2019-09-02T00:29:40.000Z | 2019-09-02T00:29:40.000Z | from __future__ import unicode_literals
import argparse
import logging
from dvc.command.base import CmdBase, append_doc_link
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
class CmdRun(CmdBase):
def run(self):
overwrite = self.args.yes or self.args.overwrite_dvcfile
if not any(
[
self.args.deps,
self.args.outs,
self.args.outs_no_cache,
self.args.metrics,
self.args.metrics_no_cache,
self.args.outs_persist,
self.args.outs_persist_no_cache,
self.args.command,
]
): # pragma: no cover
logger.error(
"too few arguments. Specify at least one: '-d', '-o', '-O',"
" '-m', '-M', '--outs-persist', '--outs-persist-no-cache',"
" 'command'."
)
return 1
try:
self.repo.run(
cmd=self._parsed_cmd(),
outs=self.args.outs,
outs_no_cache=self.args.outs_no_cache,
metrics=self.args.metrics,
metrics_no_cache=self.args.metrics_no_cache,
deps=self.args.deps,
fname=self.args.file,
cwd=self.args.cwd,
wdir=self.args.wdir,
no_exec=self.args.no_exec,
overwrite=overwrite,
ignore_build_cache=self.args.ignore_build_cache,
remove_outs=self.args.remove_outs,
no_commit=self.args.no_commit,
outs_persist=self.args.outs_persist,
outs_persist_no_cache=self.args.outs_persist_no_cache,
)
except DvcException:
logger.exception("failed to run command")
return 1
return 0
def _parsed_cmd(self):
"""
We need to take into account two cases:
- ['python code.py foo bar']: Used mainly with dvc as a library
- ['echo', 'foo bar']: List of arguments received from the CLI
The second case would need quoting, as it was passed through:
dvc run echo "foo bar"
"""
if len(self.args.command) < 2:
return " ".join(self.args.command)
return " ".join(self._quote_argument(arg) for arg in self.args.command)
def _quote_argument(self, argument):
if " " not in argument or '"' in argument:
return argument
return '"{}"'.format(argument)
def add_parser(subparsers, parent_parser):
RUN_HELP = "Generate a stage file from a command and execute the command."
run_parser = subparsers.add_parser(
"run",
parents=[parent_parser],
description=append_doc_link(RUN_HELP, "run"),
help=RUN_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
run_parser.add_argument(
"-d",
"--deps",
action="append",
default=[],
help="Declare dependencies for reproducible cmd.",
)
run_parser.add_argument(
"-o",
"--outs",
action="append",
default=[],
help="Declare output file or directory.",
)
run_parser.add_argument(
"-O",
"--outs-no-cache",
action="append",
default=[],
help="Declare output file or directory "
"(do not put into DVC cache).",
)
run_parser.add_argument(
"-m",
"--metrics",
action="append",
default=[],
help="Declare output metric file or directory.",
)
run_parser.add_argument(
"-M",
"--metrics-no-cache",
action="append",
default=[],
help="Declare output metric file or directory "
"(do not put into DVC cache).",
)
run_parser.add_argument(
"-f", "--file", help="Specify name of the DVC-file it generates."
)
run_parser.add_argument(
"-c", "--cwd", help="Deprecated, use -w and -f instead."
)
run_parser.add_argument(
"-w",
"--wdir",
help="Directory within your repo to run your command in.",
)
run_parser.add_argument(
"--no-exec",
action="store_true",
default=False,
help="Only create stage file without actually running it.",
)
run_parser.add_argument(
"-y",
"--yes",
action="store_true",
default=False,
help="Deprecated, use --overwrite-dvcfile instead",
)
run_parser.add_argument(
"--overwrite-dvcfile",
action="store_true",
default=False,
help="Overwrite existing DVC-file without asking for confirmation.",
)
run_parser.add_argument(
"--ignore-build-cache",
action="store_true",
default=False,
help="Run this stage even if it has been already ran with the same "
"command/dependencies/outputs/etc before.",
)
run_parser.add_argument(
"--remove-outs",
action="store_true",
default=False,
help="Deprecated, this is now the default behavior",
)
run_parser.add_argument(
"--no-commit",
action="store_true",
default=False,
help="Don't put files/directories into cache.",
)
run_parser.add_argument(
"--outs-persist",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro.",
)
run_parser.add_argument(
"--outs-persist-no-cache",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro (do not put into DVC cache).",
)
run_parser.add_argument(
"command", nargs=argparse.REMAINDER, help="Command to execute."
)
run_parser.set_defaults(func=CmdRun)
| 30.193878 | 79 | 0.562521 | from __future__ import unicode_literals
import argparse
import logging
from dvc.command.base import CmdBase, append_doc_link
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
class CmdRun(CmdBase):
def run(self):
overwrite = self.args.yes or self.args.overwrite_dvcfile
if not any(
[
self.args.deps,
self.args.outs,
self.args.outs_no_cache,
self.args.metrics,
self.args.metrics_no_cache,
self.args.outs_persist,
self.args.outs_persist_no_cache,
self.args.command,
]
):
logger.error(
"too few arguments. Specify at least one: '-d', '-o', '-O',"
" '-m', '-M', '--outs-persist', '--outs-persist-no-cache',"
" 'command'."
)
return 1
try:
self.repo.run(
cmd=self._parsed_cmd(),
outs=self.args.outs,
outs_no_cache=self.args.outs_no_cache,
metrics=self.args.metrics,
metrics_no_cache=self.args.metrics_no_cache,
deps=self.args.deps,
fname=self.args.file,
cwd=self.args.cwd,
wdir=self.args.wdir,
no_exec=self.args.no_exec,
overwrite=overwrite,
ignore_build_cache=self.args.ignore_build_cache,
remove_outs=self.args.remove_outs,
no_commit=self.args.no_commit,
outs_persist=self.args.outs_persist,
outs_persist_no_cache=self.args.outs_persist_no_cache,
)
except DvcException:
logger.exception("failed to run command")
return 1
return 0
def _parsed_cmd(self):
if len(self.args.command) < 2:
return " ".join(self.args.command)
return " ".join(self._quote_argument(arg) for arg in self.args.command)
def _quote_argument(self, argument):
if " " not in argument or '"' in argument:
return argument
return '"{}"'.format(argument)
def add_parser(subparsers, parent_parser):
RUN_HELP = "Generate a stage file from a command and execute the command."
run_parser = subparsers.add_parser(
"run",
parents=[parent_parser],
description=append_doc_link(RUN_HELP, "run"),
help=RUN_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
run_parser.add_argument(
"-d",
"--deps",
action="append",
default=[],
help="Declare dependencies for reproducible cmd.",
)
run_parser.add_argument(
"-o",
"--outs",
action="append",
default=[],
help="Declare output file or directory.",
)
run_parser.add_argument(
"-O",
"--outs-no-cache",
action="append",
default=[],
help="Declare output file or directory "
"(do not put into DVC cache).",
)
run_parser.add_argument(
"-m",
"--metrics",
action="append",
default=[],
help="Declare output metric file or directory.",
)
run_parser.add_argument(
"-M",
"--metrics-no-cache",
action="append",
default=[],
help="Declare output metric file or directory "
"(do not put into DVC cache).",
)
run_parser.add_argument(
"-f", "--file", help="Specify name of the DVC-file it generates."
)
run_parser.add_argument(
"-c", "--cwd", help="Deprecated, use -w and -f instead."
)
run_parser.add_argument(
"-w",
"--wdir",
help="Directory within your repo to run your command in.",
)
run_parser.add_argument(
"--no-exec",
action="store_true",
default=False,
help="Only create stage file without actually running it.",
)
run_parser.add_argument(
"-y",
"--yes",
action="store_true",
default=False,
help="Deprecated, use --overwrite-dvcfile instead",
)
run_parser.add_argument(
"--overwrite-dvcfile",
action="store_true",
default=False,
help="Overwrite existing DVC-file without asking for confirmation.",
)
run_parser.add_argument(
"--ignore-build-cache",
action="store_true",
default=False,
help="Run this stage even if it has been already ran with the same "
"command/dependencies/outputs/etc before.",
)
run_parser.add_argument(
"--remove-outs",
action="store_true",
default=False,
help="Deprecated, this is now the default behavior",
)
run_parser.add_argument(
"--no-commit",
action="store_true",
default=False,
help="Don't put files/directories into cache.",
)
run_parser.add_argument(
"--outs-persist",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro.",
)
run_parser.add_argument(
"--outs-persist-no-cache",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro (do not put into DVC cache).",
)
run_parser.add_argument(
"command", nargs=argparse.REMAINDER, help="Command to execute."
)
run_parser.set_defaults(func=CmdRun)
| true | true |
1c3c3fcd52cb384d3372de0dcc9572583ec10286 | 326 | py | Python | tests/helpers/mock_logger.py | enamrik/krogon | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | [
"MIT"
] | 1 | 2020-03-02T14:17:02.000Z | 2020-03-02T14:17:02.000Z | tests/helpers/mock_logger.py | enamrik/krogon | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | [
"MIT"
] | null | null | null | tests/helpers/mock_logger.py | enamrik/krogon | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | [
"MIT"
] | null | null | null | from unittest.mock import Mock, MagicMock
from krogon.logger import Logger
class MockLogger:
def __init__(self):
logger = Mock(spec=Logger)
logger.add_prefix = MagicMock(name='add_prefix', return_value=logger)
self.logger = Logger(name="test")
def get_mock(self):
return self.logger
| 25.076923 | 77 | 0.690184 | from unittest.mock import Mock, MagicMock
from krogon.logger import Logger
class MockLogger:
def __init__(self):
logger = Mock(spec=Logger)
logger.add_prefix = MagicMock(name='add_prefix', return_value=logger)
self.logger = Logger(name="test")
def get_mock(self):
return self.logger
| true | true |
1c3c416ed103d0d951f698f0dbd413fda6dbcb3a | 6,939 | py | Python | dataset_preprocessing/preprocess_h36m.py | akashsengupta1997/ProHMR | 7015a3d070c79b4571d43abdf5e522468091a94d | [
"BSD-3-Clause"
] | 120 | 2021-08-27T23:21:17.000Z | 2022-03-30T03:34:07.000Z | dataset_preprocessing/preprocess_h36m.py | akashsengupta1997/ProHMR | 7015a3d070c79b4571d43abdf5e522468091a94d | [
"BSD-3-Clause"
] | 17 | 2021-09-08T10:10:37.000Z | 2022-03-17T02:40:21.000Z | dataset_preprocessing/preprocess_h36m.py | akashsengupta1997/ProHMR | 7015a3d070c79b4571d43abdf5e522468091a94d | [
"BSD-3-Clause"
] | 10 | 2021-08-31T06:08:49.000Z | 2022-03-29T21:51:14.000Z | import os
import sys
import cv2
import glob
import h5py
import numpy as np
import argparse
from spacepy import pycdf
import pickle
from prohmr.configs import prohmr_config, dataset_config
parser = argparse.ArgumentParser(description='Generate H36M dataset files')
parser.add_argument('--split', type=str, required=True, choices=['VAL', 'VAL-P2', 'TRAIN', 'MULTIVIEW'], help='Dataset split to preprocess')
args = parser.parse_args()
def preprocess_h36m(dataset_path: str, out_file: str, split: str, extract_img: bool = False):
'''
Generate H36M training and validation npz files
Args:
dataset_path (str): Path to H36M root
out_file (str): Output filename
split (str): Whether it is TRAIN/VAL/VAL-P2
extract_img: Whether to extract the images from the videos
'''
# convert joints to global order
h36m_idx = [11, 6, 7, 8, 1, 2, 3, 12, 24, 14, 15, 17, 18, 19, 25, 26, 27]
global_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]
# structs we use
imgnames_, scales_, centers_, extra_keypoints_2d_, extra_keypoints_3d_ = [], [], [], [], []
if split == 'train':
user_list = [1, 5, 6, 7, 8]
elif split == 'val' or split == 'val-p2':
user_list = [9, 11]
# go over each user
for user_i in user_list:
user_name = 'S%d' % user_i
# path with GT bounding boxes
bbox_path = os.path.join(dataset_path, user_name, 'MySegmentsMat', 'ground_truth_bb')
# path with GT 3D pose
pose_path = os.path.join(dataset_path, user_name, 'MyPoseFeatures', 'D3_Positions_mono')
# path with GT 2D pose
pose2d_path = os.path.join(dataset_path, user_name, 'MyPoseFeatures', 'D2_Positions')
# path with videos
vid_path = os.path.join(dataset_path, user_name, 'Videos')
# go over all the sequences of each user
seq_list = glob.glob(os.path.join(pose_path, '*.cdf'))
seq_list.sort()
for seq_i in seq_list:
# sequence info
seq_name = seq_i.split('/')[-1]
action, camera, _ = seq_name.split('.')
action = action.replace(' ', '_')
# irrelevant sequences
if action == '_ALL':
continue
# 3D pose file
poses_3d = pycdf.CDF(seq_i)['Pose'][0]
# 2D pose file
pose2d_file = os.path.join(pose2d_path, seq_name)
poses_2d = pycdf.CDF(pose2d_file)['Pose'][0]
# bbox file
bbox_file = os.path.join(bbox_path, seq_name.replace('cdf', 'mat'))
bbox_h5py = h5py.File(bbox_file)
# video file
if extract_img:
vid_file = os.path.join(vid_path, seq_name.replace('cdf', 'mp4'))
imgs_path = os.path.join(dataset_path, 'images')
vidcap = cv2.VideoCapture(vid_file)
success, image = vidcap.read()
# go over each frame of the sequence
for frame_i in range(poses_3d.shape[0]):
# read video frame
if extract_img:
success, image = vidcap.read()
if not success:
break
# check if you can keep this frame
if frame_i % 5 == 0 and (split == 'VAL' or split == 'TRAIN' or camera == '60457274'):
# image name
imgname = '%s_%s.%s_%06d.jpg' % (user_name, action, camera, frame_i+1)
# save image
if extract_img:
img_out = os.path.join(imgs_path, imgname)
cv2.imwrite(img_out, image)
# read GT bounding box
mask = bbox_h5py[bbox_h5py['Masks'][frame_i,0]].value.T
ys, xs = np.where(mask==1)
bbox = np.array([np.min(xs), np.min(ys), np.max(xs)+1, np.max(ys)+1])
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = 0.9*max(bbox[2]-bbox[0], bbox[3]-bbox[1])
# read GT 2D pose
partall = np.reshape(poses_2d[frame_i,:], [-1,2])
part17 = partall[h36m_idx]
extra_keypoints_2d = np.zeros([19,3])
extra_keypoints_2d[global_idx, :2] = part17
extra_keypoints_2d[global_idx, 2] = 1
# read GT 3D pose
Sall = np.reshape(poses_3d[frame_i,:], [-1,3])/1000.
S17 = Sall[h36m_idx]
S17 -= S17[0] # root-centered
extra_keypoints_3d = np.zeros([19,4])
extra_keypoints_3d[global_idx, :3] = S17
extra_keypoints_3d[global_idx, 3] = 1
# store data
imgnames_.append(os.path.join('images', imgname))
centers_.append(center)
scales_.append(scale)
extra_keypoints_2d_.append(extra_keypoints_2d)
extra_keypoints_3d_.append(extra_keypoints_3d)
# store the data struct
if not os.path.isdir(out_file):
os.makedirs(out_file)
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
extra_keypoints_2d=extra_keypoints_2d,
extra_keypoints_3d=extra_keypoints_3d)
def preprocess_h36m_multiview(input_file: str, out_file: str):
'''
Generate H36M multiview evaluation file
Args:
input_file (str): H36M validation npz filename
out_file (str): Output filename
'''
x = dict(np.load(input_file))
imgname = x['imgname']
actions = np.unique([img.split('/')[-1].split('.')[0] for img in imgname])
frames = {action: {} for action in actions}
for i, img in enumerate(imgname):
action_with_cam = img.split('/')[-1]
action = action_with_cam.split('.')[0]
cam = action_with_cam.split('.')[1].split('_')[0]
if cam in frames[action]:
frames[action][cam].append(i)
else:
frames[action][cam] = []
data_list = []
for action in frames.keys():
cams = list(frames[action].keys())
for n in range(len(frames[action][cams[0]])):
keep_frames = []
for cam in cams:
keep_frames.append(frames[action][cam][n])
data_list.append({k: v[keep_frames] for k,v in x.items()})
pickle.dump(data_list, open(out_file, 'wb'))
if __name__ == '__main__':
dataset_cfg = dataset_config()[f'H36M-{args.split}']
if args.split == 'MULTIVIEW':
preprocess_h36m_multiview(dataset_config()['H36M-VAL'].DATASET_FILE, dataset_cfg.DATASET_FILE)
else:
preprocess_h36m(dataset_cfg.IMG_DIR, dataset_cfg.DATASET_FILE, args.split, extract_img=True)
| 39.87931 | 140 | 0.555556 | import os
import sys
import cv2
import glob
import h5py
import numpy as np
import argparse
from spacepy import pycdf
import pickle
from prohmr.configs import prohmr_config, dataset_config
parser = argparse.ArgumentParser(description='Generate H36M dataset files')
parser.add_argument('--split', type=str, required=True, choices=['VAL', 'VAL-P2', 'TRAIN', 'MULTIVIEW'], help='Dataset split to preprocess')
args = parser.parse_args()
def preprocess_h36m(dataset_path: str, out_file: str, split: str, extract_img: bool = False):
h36m_idx = [11, 6, 7, 8, 1, 2, 3, 12, 24, 14, 15, 17, 18, 19, 25, 26, 27]
global_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]
imgnames_, scales_, centers_, extra_keypoints_2d_, extra_keypoints_3d_ = [], [], [], [], []
if split == 'train':
user_list = [1, 5, 6, 7, 8]
elif split == 'val' or split == 'val-p2':
user_list = [9, 11]
for user_i in user_list:
user_name = 'S%d' % user_i
bbox_path = os.path.join(dataset_path, user_name, 'MySegmentsMat', 'ground_truth_bb')
pose_path = os.path.join(dataset_path, user_name, 'MyPoseFeatures', 'D3_Positions_mono')
pose2d_path = os.path.join(dataset_path, user_name, 'MyPoseFeatures', 'D2_Positions')
vid_path = os.path.join(dataset_path, user_name, 'Videos')
seq_list = glob.glob(os.path.join(pose_path, '*.cdf'))
seq_list.sort()
for seq_i in seq_list:
seq_name = seq_i.split('/')[-1]
action, camera, _ = seq_name.split('.')
action = action.replace(' ', '_')
if action == '_ALL':
continue
poses_3d = pycdf.CDF(seq_i)['Pose'][0]
pose2d_file = os.path.join(pose2d_path, seq_name)
poses_2d = pycdf.CDF(pose2d_file)['Pose'][0]
bbox_file = os.path.join(bbox_path, seq_name.replace('cdf', 'mat'))
bbox_h5py = h5py.File(bbox_file)
if extract_img:
vid_file = os.path.join(vid_path, seq_name.replace('cdf', 'mp4'))
imgs_path = os.path.join(dataset_path, 'images')
vidcap = cv2.VideoCapture(vid_file)
success, image = vidcap.read()
for frame_i in range(poses_3d.shape[0]):
if extract_img:
success, image = vidcap.read()
if not success:
break
if frame_i % 5 == 0 and (split == 'VAL' or split == 'TRAIN' or camera == '60457274'):
imgname = '%s_%s.%s_%06d.jpg' % (user_name, action, camera, frame_i+1)
if extract_img:
img_out = os.path.join(imgs_path, imgname)
cv2.imwrite(img_out, image)
mask = bbox_h5py[bbox_h5py['Masks'][frame_i,0]].value.T
ys, xs = np.where(mask==1)
bbox = np.array([np.min(xs), np.min(ys), np.max(xs)+1, np.max(ys)+1])
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = 0.9*max(bbox[2]-bbox[0], bbox[3]-bbox[1])
partall = np.reshape(poses_2d[frame_i,:], [-1,2])
part17 = partall[h36m_idx]
extra_keypoints_2d = np.zeros([19,3])
extra_keypoints_2d[global_idx, :2] = part17
extra_keypoints_2d[global_idx, 2] = 1
Sall = np.reshape(poses_3d[frame_i,:], [-1,3])/1000.
S17 = Sall[h36m_idx]
S17 -= S17[0]
extra_keypoints_3d = np.zeros([19,4])
extra_keypoints_3d[global_idx, :3] = S17
extra_keypoints_3d[global_idx, 3] = 1
imgnames_.append(os.path.join('images', imgname))
centers_.append(center)
scales_.append(scale)
extra_keypoints_2d_.append(extra_keypoints_2d)
extra_keypoints_3d_.append(extra_keypoints_3d)
if not os.path.isdir(out_file):
os.makedirs(out_file)
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
extra_keypoints_2d=extra_keypoints_2d,
extra_keypoints_3d=extra_keypoints_3d)
def preprocess_h36m_multiview(input_file: str, out_file: str):
x = dict(np.load(input_file))
imgname = x['imgname']
actions = np.unique([img.split('/')[-1].split('.')[0] for img in imgname])
frames = {action: {} for action in actions}
for i, img in enumerate(imgname):
action_with_cam = img.split('/')[-1]
action = action_with_cam.split('.')[0]
cam = action_with_cam.split('.')[1].split('_')[0]
if cam in frames[action]:
frames[action][cam].append(i)
else:
frames[action][cam] = []
data_list = []
for action in frames.keys():
cams = list(frames[action].keys())
for n in range(len(frames[action][cams[0]])):
keep_frames = []
for cam in cams:
keep_frames.append(frames[action][cam][n])
data_list.append({k: v[keep_frames] for k,v in x.items()})
pickle.dump(data_list, open(out_file, 'wb'))
if __name__ == '__main__':
dataset_cfg = dataset_config()[f'H36M-{args.split}']
if args.split == 'MULTIVIEW':
preprocess_h36m_multiview(dataset_config()['H36M-VAL'].DATASET_FILE, dataset_cfg.DATASET_FILE)
else:
preprocess_h36m(dataset_cfg.IMG_DIR, dataset_cfg.DATASET_FILE, args.split, extract_img=True)
| true | true |
1c3c4280efae7a8b28a6d112d58598efe7e727b5 | 4,560 | py | Python | src/day8.py | dsimon96/AdventOfCode2021 | 9a538181b3c477cd0f360ce410d31fa7180db3b7 | [
"MIT"
] | null | null | null | src/day8.py | dsimon96/AdventOfCode2021 | 9a538181b3c477cd0f360ce410d31fa7180db3b7 | [
"MIT"
] | null | null | null | src/day8.py | dsimon96/AdventOfCode2021 | 9a538181b3c477cd0f360ce410d31fa7180db3b7 | [
"MIT"
] | null | null | null | from typing import Generator, Iterable, TextIO
import sys
import click
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum, auto
@click.group()
def main(): pass
class Segment(Enum):
T = auto()
M = auto()
B = auto()
UL = auto()
UR = auto()
LL = auto()
LR = auto()
class Signal(Enum):
A = auto()
B = auto()
C = auto()
D = auto()
E = auto()
F = auto()
G = auto()
DIGITS: dict[frozenset[Segment], int] = {
frozenset((Segment.T, Segment.UL, Segment.UR, Segment.LL, Segment.LR,
Segment.B)): 0,
frozenset((Segment.UR, Segment.LR)): 1,
frozenset((Segment.T, Segment.UR, Segment.M, Segment.LL, Segment.B)): 2,
frozenset((Segment.T, Segment.UR, Segment.M, Segment.LR, Segment.B)): 3,
frozenset((Segment.UL, Segment.M, Segment.UR, Segment.LR)): 4,
frozenset((Segment.T, Segment.UL, Segment.M, Segment.LR, Segment.B)): 5,
frozenset((Segment.T, Segment.UL, Segment.M, Segment.LL, Segment.LR,
Segment.B)): 6,
frozenset((Segment.T, Segment.UR, Segment.LR)): 7,
frozenset((Segment.T, Segment.UL, Segment.UR, Segment.M, Segment.LL,
Segment.LR, Segment.B)): 8,
frozenset((Segment.T, Segment.UL, Segment.UR, Segment.M, Segment.LR,
Segment.B)): 9,
}
@dataclass(eq=True, frozen=True)
class SignalPattern:
signals: frozenset[Signal]
@staticmethod
def from_str(s: str) -> "SignalPattern":
signals: set[Signal] = set()
for c in s:
signals.add(Signal[c.upper()])
return SignalPattern(frozenset(signals))
@dataclass
class Observation:
patterns: list[SignalPattern]
output: list[SignalPattern]
@staticmethod
def from_str(s: str) -> "Observation":
seq_str, output_str = s.split("|")
return Observation(
patterns=[SignalPattern.from_str(tok)
for tok in seq_str.strip().split()],
output=[SignalPattern.from_str(tok)
for tok in output_str.strip().split()])
def get_observations(inp: TextIO) -> Generator[Observation, None, None]:
for line in inp:
yield Observation.from_str(line)
def can_only_be_one_digit(pattern: SignalPattern) -> bool:
return len(pattern.signals) in (2, 3, 4, 7)
@main.command()
def part1():
num_unique_digits = 0
for observation in get_observations(sys.stdin):
for pattern in observation.output:
if can_only_be_one_digit(pattern):
num_unique_digits += 1
print(num_unique_digits)
def determine_mapping(
patterns: Iterable[SignalPattern]
) -> dict[SignalPattern, int]:
patterns_by_num_signals: dict[int, set[SignalPattern]] = defaultdict(set)
num_on_by_signal: dict[Signal, int] = defaultdict(int)
for pattern in patterns:
patterns_by_num_signals[len(pattern.signals)].add(pattern)
for signal in pattern.signals:
num_on_by_signal[signal] += 1
p1, = patterns_by_num_signals[2]
p4, = patterns_by_num_signals[4]
p7, = patterns_by_num_signals[3]
p8, = patterns_by_num_signals[7]
signals_by_num_on: dict[int, set[Signal]] = defaultdict(set)
for signal, count in num_on_by_signal.items():
signals_by_num_on[count].add(signal)
sig_t, = p7.signals - p1.signals
sig_ul, = signals_by_num_on[6]
sig_ll, = signals_by_num_on[4]
sig_lr, = signals_by_num_on[9]
sig_ur, = p1.signals - set((sig_lr,))
sig_m, = p4.signals - set((sig_ul, sig_ur, sig_lr))
sig_b, = p8.signals - set((sig_t, sig_ul, sig_ur, sig_m, sig_ll, sig_lr))
sig_to_seg: dict[Signal, Segment] = {
sig_t: Segment.T,
sig_ul: Segment.UL,
sig_ur: Segment.UR,
sig_ll: Segment.LL,
sig_lr: Segment.LR,
sig_m: Segment.M,
sig_b: Segment.B,
}
res: dict[SignalPattern, int] = {}
for pattern in patterns:
segments = frozenset({sig_to_seg[sig] for sig in pattern.signals})
res[pattern] = DIGITS[segments]
return res
def decode_output_seq(
output: Iterable[SignalPattern],
mapping: dict[SignalPattern, int]
) -> int:
res = 0
for pattern in output:
res *= 10
res += mapping[pattern]
return res
@main.command()
def part2():
tot = 0
for observation in get_observations(sys.stdin):
mapping = determine_mapping(observation.patterns)
tot += decode_output_seq(observation.output, mapping)
print(tot)
if __name__ == '__main__':
main()
| 27.142857 | 77 | 0.635526 | from typing import Generator, Iterable, TextIO
import sys
import click
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum, auto
@click.group()
def main(): pass
class Segment(Enum):
T = auto()
M = auto()
B = auto()
UL = auto()
UR = auto()
LL = auto()
LR = auto()
class Signal(Enum):
A = auto()
B = auto()
C = auto()
D = auto()
E = auto()
F = auto()
G = auto()
DIGITS: dict[frozenset[Segment], int] = {
frozenset((Segment.T, Segment.UL, Segment.UR, Segment.LL, Segment.LR,
Segment.B)): 0,
frozenset((Segment.UR, Segment.LR)): 1,
frozenset((Segment.T, Segment.UR, Segment.M, Segment.LL, Segment.B)): 2,
frozenset((Segment.T, Segment.UR, Segment.M, Segment.LR, Segment.B)): 3,
frozenset((Segment.UL, Segment.M, Segment.UR, Segment.LR)): 4,
frozenset((Segment.T, Segment.UL, Segment.M, Segment.LR, Segment.B)): 5,
frozenset((Segment.T, Segment.UL, Segment.M, Segment.LL, Segment.LR,
Segment.B)): 6,
frozenset((Segment.T, Segment.UR, Segment.LR)): 7,
frozenset((Segment.T, Segment.UL, Segment.UR, Segment.M, Segment.LL,
Segment.LR, Segment.B)): 8,
frozenset((Segment.T, Segment.UL, Segment.UR, Segment.M, Segment.LR,
Segment.B)): 9,
}
@dataclass(eq=True, frozen=True)
class SignalPattern:
signals: frozenset[Signal]
@staticmethod
def from_str(s: str) -> "SignalPattern":
signals: set[Signal] = set()
for c in s:
signals.add(Signal[c.upper()])
return SignalPattern(frozenset(signals))
@dataclass
class Observation:
patterns: list[SignalPattern]
output: list[SignalPattern]
@staticmethod
def from_str(s: str) -> "Observation":
seq_str, output_str = s.split("|")
return Observation(
patterns=[SignalPattern.from_str(tok)
for tok in seq_str.strip().split()],
output=[SignalPattern.from_str(tok)
for tok in output_str.strip().split()])
def get_observations(inp: TextIO) -> Generator[Observation, None, None]:
for line in inp:
yield Observation.from_str(line)
def can_only_be_one_digit(pattern: SignalPattern) -> bool:
return len(pattern.signals) in (2, 3, 4, 7)
@main.command()
def part1():
num_unique_digits = 0
for observation in get_observations(sys.stdin):
for pattern in observation.output:
if can_only_be_one_digit(pattern):
num_unique_digits += 1
print(num_unique_digits)
def determine_mapping(
patterns: Iterable[SignalPattern]
) -> dict[SignalPattern, int]:
patterns_by_num_signals: dict[int, set[SignalPattern]] = defaultdict(set)
num_on_by_signal: dict[Signal, int] = defaultdict(int)
for pattern in patterns:
patterns_by_num_signals[len(pattern.signals)].add(pattern)
for signal in pattern.signals:
num_on_by_signal[signal] += 1
p1, = patterns_by_num_signals[2]
p4, = patterns_by_num_signals[4]
p7, = patterns_by_num_signals[3]
p8, = patterns_by_num_signals[7]
signals_by_num_on: dict[int, set[Signal]] = defaultdict(set)
for signal, count in num_on_by_signal.items():
signals_by_num_on[count].add(signal)
sig_t, = p7.signals - p1.signals
sig_ul, = signals_by_num_on[6]
sig_ll, = signals_by_num_on[4]
sig_lr, = signals_by_num_on[9]
sig_ur, = p1.signals - set((sig_lr,))
sig_m, = p4.signals - set((sig_ul, sig_ur, sig_lr))
sig_b, = p8.signals - set((sig_t, sig_ul, sig_ur, sig_m, sig_ll, sig_lr))
sig_to_seg: dict[Signal, Segment] = {
sig_t: Segment.T,
sig_ul: Segment.UL,
sig_ur: Segment.UR,
sig_ll: Segment.LL,
sig_lr: Segment.LR,
sig_m: Segment.M,
sig_b: Segment.B,
}
res: dict[SignalPattern, int] = {}
for pattern in patterns:
segments = frozenset({sig_to_seg[sig] for sig in pattern.signals})
res[pattern] = DIGITS[segments]
return res
def decode_output_seq(
output: Iterable[SignalPattern],
mapping: dict[SignalPattern, int]
) -> int:
res = 0
for pattern in output:
res *= 10
res += mapping[pattern]
return res
@main.command()
def part2():
tot = 0
for observation in get_observations(sys.stdin):
mapping = determine_mapping(observation.patterns)
tot += decode_output_seq(observation.output, mapping)
print(tot)
if __name__ == '__main__':
main()
| true | true |
1c3c443fbc770e278b41fd2f337cd4dd0b1ae13e | 3,676 | py | Python | rlcard/envs/leducholdem.py | thomasthechen/rlcard | 0139d0e403b6d844a8f9107237887d73c7e8d752 | [
"MIT"
] | null | null | null | rlcard/envs/leducholdem.py | thomasthechen/rlcard | 0139d0e403b6d844a8f9107237887d73c7e8d752 | [
"MIT"
] | null | null | null | rlcard/envs/leducholdem.py | thomasthechen/rlcard | 0139d0e403b6d844a8f9107237887d73c7e8d752 | [
"MIT"
] | null | null | null | import json
import os
import numpy as np
import rlcard
from rlcard.envs import Env
from rlcard.games.leducholdem import Game
from rlcard.utils import *
from rlcard import models
class LeducholdemEnv(Env):
''' Leduc Hold'em Environment
'''
def __init__(self, config):
''' Initialize the Limitholdem environment
'''
self.game = Game()
super().__init__(config)
self.actions = ['call', 'raise', 'fold', 'check']
self.state_shape = [36]
with open(os.path.join(rlcard.__path__[0], 'games/leducholdem/card2index.json'), 'r') as file:
self.card2index = json.load(file)
def _load_model(self):
''' Load pretrained/rule model
Returns:
model (Model): A Model object
'''
return models.load('leduc-holdem-cfr')
def _get_legal_actions(self):
''' Get all leagal actions
Returns:
encoded_action_list (list): return encoded legal action list (from str to int)
'''
return self.game.get_legal_actions()
def _extract_state(self, state):
''' Extract the state representation from state dictionary for agent
Note: Currently the use the hand cards and the public cards. TODO: encode the states
Args:
state (dict): Original state from the game
Returns:
observation (list): combine the player's score and dealer's observable score for observation
'''
extracted_state = {}
legal_actions = [self.actions.index(a) for a in state['legal_actions']]
extracted_state['legal_actions'] = legal_actions
public_card = state['public_card']
hand = state['hand']
obs = np.zeros(36)
obs[self.card2index[hand]] = 1
if public_card:
obs[self.card2index[public_card]+3] = 1
obs[state['my_chips']+6] = 1
obs[state['all_chips'][1]+20] = 1
extracted_state['obs'] = obs
if self.allow_raw_data:
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in state['legal_actions']]
if self.record_action:
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoff of a game
Returns:
payoffs (list): list of payoffs
'''
return self.game.get_payoffs()
def _decode_action(self, action_id):
''' Decode the action for applying to the game
Args:
action id (int): action id
Returns:
action (str): action for the game
'''
legal_actions = self.game.get_legal_actions()
if self.actions[action_id] not in legal_actions:
if 'check' in legal_actions:
return 'check'
else:
return 'fold'
return self.actions[action_id]
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
'''
state = {}
state['chips'] = [self.game.players[i].in_chips for i in range(self.player_num)]
state['public_card'] = self.game.public_card.get_index() if self.game.public_card else None
state['hand_cards'] = [self.game.players[i].hand.get_index() for i in range(self.player_num)]
state['current_round'] = self.game.round_counter
state['current_player'] = self.game.game_pointer
state['legal_actions'] = self.game.get_legal_actions()
return state
| 31.689655 | 104 | 0.612078 | import json
import os
import numpy as np
import rlcard
from rlcard.envs import Env
from rlcard.games.leducholdem import Game
from rlcard.utils import *
from rlcard import models
class LeducholdemEnv(Env):
def __init__(self, config):
self.game = Game()
super().__init__(config)
self.actions = ['call', 'raise', 'fold', 'check']
self.state_shape = [36]
with open(os.path.join(rlcard.__path__[0], 'games/leducholdem/card2index.json'), 'r') as file:
self.card2index = json.load(file)
def _load_model(self):
return models.load('leduc-holdem-cfr')
def _get_legal_actions(self):
return self.game.get_legal_actions()
def _extract_state(self, state):
extracted_state = {}
legal_actions = [self.actions.index(a) for a in state['legal_actions']]
extracted_state['legal_actions'] = legal_actions
public_card = state['public_card']
hand = state['hand']
obs = np.zeros(36)
obs[self.card2index[hand]] = 1
if public_card:
obs[self.card2index[public_card]+3] = 1
obs[state['my_chips']+6] = 1
obs[state['all_chips'][1]+20] = 1
extracted_state['obs'] = obs
if self.allow_raw_data:
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in state['legal_actions']]
if self.record_action:
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
return self.game.get_payoffs()
def _decode_action(self, action_id):
legal_actions = self.game.get_legal_actions()
if self.actions[action_id] not in legal_actions:
if 'check' in legal_actions:
return 'check'
else:
return 'fold'
return self.actions[action_id]
def get_perfect_information(self):
state = {}
state['chips'] = [self.game.players[i].in_chips for i in range(self.player_num)]
state['public_card'] = self.game.public_card.get_index() if self.game.public_card else None
state['hand_cards'] = [self.game.players[i].hand.get_index() for i in range(self.player_num)]
state['current_round'] = self.game.round_counter
state['current_player'] = self.game.game_pointer
state['legal_actions'] = self.game.get_legal_actions()
return state
| true | true |
1c3c447d3a1075c907b25e2e2f2d4593cff8548d | 417 | py | Python | plugins/keepkey/cmdline.py | BTCPrivate/electrum-bitcoinprivate | d18dbd83353d006136bc986e143e19dbb954c36a | [
"MIT"
] | 1 | 2021-04-02T20:35:15.000Z | 2021-04-02T20:35:15.000Z | plugins/keepkey/cmdline.py | ArdeshirV/electrum-bitcoinprivate | d18dbd83353d006136bc986e143e19dbb954c36a | [
"MIT"
] | null | null | null | plugins/keepkey/cmdline.py | ArdeshirV/electrum-bitcoinprivate | d18dbd83353d006136bc986e143e19dbb954c36a | [
"MIT"
] | 1 | 2021-04-06T18:34:31.000Z | 2021-04-06T18:34:31.000Z | from electrum_bitcoinprivate.plugins import hook
from .keepkey import KeepKeyPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(KeepKeyPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
| 27.8 | 57 | 0.721823 | from electrum_bitcoinprivate.plugins import hook
from .keepkey import KeepKeyPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(KeepKeyPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
| true | true |
1c3c452c359788e2fcf75c6cbd76e2dd7b62dbf3 | 592 | py | Python | ObjectDetection/scripts/utils/beewatch_backgroundsubstraction_adv.py | GLopezMUZH/bee_tracking_NN | 0007e9a0749bfea3e74c997553059b6931ed6de8 | [
"MIT"
] | null | null | null | ObjectDetection/scripts/utils/beewatch_backgroundsubstraction_adv.py | GLopezMUZH/bee_tracking_NN | 0007e9a0749bfea3e74c997553059b6931ed6de8 | [
"MIT"
] | null | null | null | ObjectDetection/scripts/utils/beewatch_backgroundsubstraction_adv.py | GLopezMUZH/bee_tracking_NN | 0007e9a0749bfea3e74c997553059b6931ed6de8 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
video_path = 'C:\\Data\\beeWatch\\temp\\Erlen_Hive_11_20190916_125421_540_M.mp4'
cap = cv2.VideoCapture(video_path)
subtractor = cv2.createBackgroundSubtractorMOG2(history=5, varThreshold = 25, detectShadows = False) #120 frames moving window, reduction, filtering, morfological transformation and shadows
while True:
_, frame = cap.read()
mask = subtractor.apply(frame)
cv2.imshow("Frame", frame)
cv2.imshow("Mask", mask)
key = cv2.waitKey(30)
if key == 27:
break
cap.release()
cap.destroyAllWindows()
| 26.909091 | 190 | 0.699324 | import cv2
import numpy as np
video_path = 'C:\\Data\\beeWatch\\temp\\Erlen_Hive_11_20190916_125421_540_M.mp4'
cap = cv2.VideoCapture(video_path)
subtractor = cv2.createBackgroundSubtractorMOG2(history=5, varThreshold = 25, detectShadows = False)
while True:
_, frame = cap.read()
mask = subtractor.apply(frame)
cv2.imshow("Frame", frame)
cv2.imshow("Mask", mask)
key = cv2.waitKey(30)
if key == 27:
break
cap.release()
cap.destroyAllWindows()
| true | true |
1c3c4677450a24582376f55b330463c7b4000eaf | 2,971 | py | Python | tests/test_loader.py | pyarmory/pike | 02faeec21a9a9cad5d928ab8830776612f81e084 | [
"Apache-2.0"
] | 21 | 2015-07-05T20:02:10.000Z | 2022-02-02T07:04:22.000Z | tests/test_loader.py | pyarmory/pike | 02faeec21a9a9cad5d928ab8830776612f81e084 | [
"Apache-2.0"
] | 9 | 2015-07-23T16:30:50.000Z | 2018-10-06T15:11:32.000Z | tests/test_loader.py | pyarmory/pike | 02faeec21a9a9cad5d928ab8830776612f81e084 | [
"Apache-2.0"
] | 5 | 2016-05-26T15:27:35.000Z | 2018-03-26T16:01:02.000Z | import os
import py_compile
import six
import pytest
from pike.finder import PikeFinder
from pike.loader import PikeLoader
from tests import utils
SIMPLE_CLASS = """
class Tracer(object):
pass
"""
@pytest.fixture
def loader_finder():
temp_folder = utils.make_tmpdir()
# Create a simple package
pkg_location = utils.create_working_package(temp_folder)
mod_location = os.path.join(pkg_location, 'app.py')
utils.write_file(mod_location, SIMPLE_CLASS)
finder = PikeFinder([temp_folder])
loader = PikeLoader('pike_tests.app', mod_location)
yield loader, finder
utils.remove_dir(temp_folder)
@pytest.fixture
def compiled_loader():
temp_folder = utils.make_tmpdir()
# Create a simple package
pkg_location = utils.create_working_package(temp_folder, 'compile_test')
mod_location = os.path.join(pkg_location, 'app.py')
utils.write_file(mod_location, SIMPLE_CLASS)
py_compile.compile(mod_location)
yield temp_folder
utils.remove_dir(temp_folder)
def test_load_module_raises_import_error_with_bad_fullname(loader_finder):
loader, _ = loader_finder
with pytest.raises(ImportError):
loader.load_module('bam')
def test_is_package(loader_finder):
_, finder = loader_finder
loader = finder.find_module('pike_tests')
assert loader.is_package()
def test_module_isnt_package(loader_finder):
_, finder = loader_finder
loader = finder.find_module('pike_tests.app')
assert not loader.is_package()
def test_load_package_module(loader_finder):
_, finder = loader_finder
loader = finder.find_module('pike_tests')
module = loader.load_module('pike_tests')
assert module is not None
def test_second_load_pulls_previously_loaded_module(loader_finder):
loader, _ = loader_finder
first_load = loader.load_module('pike_tests.app')
second_load = loader.load_module('pike_tests.app')
assert first_load == second_load
def test_load_module_by_path_with_invalid_path(loader_finder):
loader, _ = loader_finder
module = loader.load_module_by_path('name', 'something.bam')
assert module is None
@pytest.mark.skip('pyc loading is disabled')
def test_loading_pyc(compiled_loader):
finder = PikeFinder([compiled_loader])
# Loading compiled module
loader = finder.find_module('compile_test.app')
module = loader.load_module('compile_test.app')
if six.PY3:
assert type(module.__loader__).__name__ == 'SourcelessFileLoader'
assert module.__cached__.endswith('.pyc')
else:
assert module.__file__.endswith('app.pyc')
def test_loading_py(compiled_loader):
finder = PikeFinder([compiled_loader])
# Loading module source
loader = finder.find_module('compile_test')
module = loader.load_module('compile_test')
if six.PY3:
assert type(module.__loader__).__name__ == 'SourceFileLoader'
else:
assert module.__file__.endswith('__init__.py')
| 24.758333 | 76 | 0.736789 | import os
import py_compile
import six
import pytest
from pike.finder import PikeFinder
from pike.loader import PikeLoader
from tests import utils
SIMPLE_CLASS = """
class Tracer(object):
pass
"""
@pytest.fixture
def loader_finder():
temp_folder = utils.make_tmpdir()
pkg_location = utils.create_working_package(temp_folder)
mod_location = os.path.join(pkg_location, 'app.py')
utils.write_file(mod_location, SIMPLE_CLASS)
finder = PikeFinder([temp_folder])
loader = PikeLoader('pike_tests.app', mod_location)
yield loader, finder
utils.remove_dir(temp_folder)
@pytest.fixture
def compiled_loader():
temp_folder = utils.make_tmpdir()
pkg_location = utils.create_working_package(temp_folder, 'compile_test')
mod_location = os.path.join(pkg_location, 'app.py')
utils.write_file(mod_location, SIMPLE_CLASS)
py_compile.compile(mod_location)
yield temp_folder
utils.remove_dir(temp_folder)
def test_load_module_raises_import_error_with_bad_fullname(loader_finder):
loader, _ = loader_finder
with pytest.raises(ImportError):
loader.load_module('bam')
def test_is_package(loader_finder):
_, finder = loader_finder
loader = finder.find_module('pike_tests')
assert loader.is_package()
def test_module_isnt_package(loader_finder):
_, finder = loader_finder
loader = finder.find_module('pike_tests.app')
assert not loader.is_package()
def test_load_package_module(loader_finder):
_, finder = loader_finder
loader = finder.find_module('pike_tests')
module = loader.load_module('pike_tests')
assert module is not None
def test_second_load_pulls_previously_loaded_module(loader_finder):
loader, _ = loader_finder
first_load = loader.load_module('pike_tests.app')
second_load = loader.load_module('pike_tests.app')
assert first_load == second_load
def test_load_module_by_path_with_invalid_path(loader_finder):
loader, _ = loader_finder
module = loader.load_module_by_path('name', 'something.bam')
assert module is None
@pytest.mark.skip('pyc loading is disabled')
def test_loading_pyc(compiled_loader):
finder = PikeFinder([compiled_loader])
loader = finder.find_module('compile_test.app')
module = loader.load_module('compile_test.app')
if six.PY3:
assert type(module.__loader__).__name__ == 'SourcelessFileLoader'
assert module.__cached__.endswith('.pyc')
else:
assert module.__file__.endswith('app.pyc')
def test_loading_py(compiled_loader):
finder = PikeFinder([compiled_loader])
loader = finder.find_module('compile_test')
module = loader.load_module('compile_test')
if six.PY3:
assert type(module.__loader__).__name__ == 'SourceFileLoader'
else:
assert module.__file__.endswith('__init__.py')
| true | true |
1c3c46902f4580ab653d3e9fcc2985d872cebf22 | 323 | py | Python | .history/catalog/urls_20220303122221.py | arsalandehghani/locallibrary | 786bbfa33dc06d9b217c4139bb6bc3886c714e8b | [
"MIT"
] | null | null | null | .history/catalog/urls_20220303122221.py | arsalandehghani/locallibrary | 786bbfa33dc06d9b217c4139bb6bc3886c714e8b | [
"MIT"
] | null | null | null | .history/catalog/urls_20220303122221.py | arsalandehghani/locallibrary | 786bbfa33dc06d9b217c4139bb6bc3886c714e8b | [
"MIT"
] | null | null | null | from unicodedata import name
from django.contrib import admin
from django.urls import include, path, url
from . import views
urlpatterns = [
# path('admin/', admin.site.urls),
# path('catalog/', include('catalog.urls')),
# path('catalog/', include('catalog.urls')),
url(r'^$', views.index, name='index')
]
| 26.916667 | 48 | 0.668731 | from unicodedata import name
from django.contrib import admin
from django.urls import include, path, url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index')
]
| true | true |
1c3c471e66ff3c07b3c278fb5b67e271f8807ac8 | 5,093 | py | Python | ga.py | G0D0T/GAforPartitioning | cbc2900722e94049ead3e6f7c8d61c66b6434f49 | [
"Apache-2.0"
] | null | null | null | ga.py | G0D0T/GAforPartitioning | cbc2900722e94049ead3e6f7c8d61c66b6434f49 | [
"Apache-2.0"
] | null | null | null | ga.py | G0D0T/GAforPartitioning | cbc2900722e94049ead3e6f7c8d61c66b6434f49 | [
"Apache-2.0"
] | null | null | null | import random
# Funzione che calcola il "costo" della partizione, dato il cromosoma e la lista dei valori
def partition(x, y):
a = b = 0
for i in range(len(x)):
# Se nel cromosoma ho 0 all'i-esimo bit, metto nella partizione a l'i-esimo elemento
if not x[i]:
a += y[i]
# Viceversa, con un bit a 1 metto nella partizione b l'i-esimo elemento
else:
b += y[i]
# Ritorno il valore assoluto (più è vicino a 0 e migliore è la partizione)
return abs(a - b)
def gen_population(s, n):
population = []
for i in range(0, s):
indv = []
# Ogni cromosoma contiene n bit (contenenti 0 o 1), pari alla dimensione della lista di interi
for j in range(n):
indv.append(random.randint(0, 1))
population.append(indv)
return population
# Funzione che esegue il crossover a un punto, dati i due genitori e il punto in questione
def crossover(g1, g2, p):
f1 = g1[:p] + g2[p:]
f2 = g1[p:] + g2[:p]
# Ritorna i due figli della coppia
return f1, f2
# Funzione per gestire la fase di mutazione
def mutate(g1, dim):
gene = random.randrange(0, dim)
# In base al numero random generato, inverto il bit a quella posizione nel cromosoma
if g1[gene]:
g1[gene] = 0
else:
g1[gene] = 1
# Ritorno il cromosoma modificato
return g1
# Funzione base dell'algoritmo genetico
def ga(problem, numbers, real_size, xover, mutation, generations):
# Controllo che la grandezza della popolazione sia pari (per avere tutte coppie)
size = real_size + real_size % 2
population = gen_population(size, len(numbers))
sum_numbers = sum(numbers)
# Ciclo fino al numero di generazioni dato in input
for i in range(1, generations + 1):
print('Generazione: ', i)
j = 0
# Lista che raccoglie i risultati della funzione costo
scores = []
# SELECTION: Ciclo per ogni coppia della popolazione
while j <= size - 2:
# print(population[j])
# print(population[j+1])
# CROSSOVER: Genero un numero casuale che determina se la coppia avrà figli (probabilità alta)
mating = random.random()
if mating > xover:
# Genero un secondo numero random per il punto del crossover
point = random.randrange(0, size)
figlio1, figlio2 = crossover(population[j], population[j+1], point)
# MUTATION: Nuovamente genero un numero per eventuali mutazioni di uno o più figli (probabilità bassa)
evolution1 = random.random()
evolution2 = random.random()
if evolution1 > mutation:
figlio1 = mutate(figlio1, len(numbers))
if evolution2 > mutation:
figlio2 = mutate(figlio2, len(numbers))
# Assegno alle seguenti variabili il valore della funzione costo
oldscore1 = problem(population[j], numbers)
newscore1 = problem(figlio1, numbers)
oldscore2 = problem(population[j+1], numbers)
newscore2 = problem(figlio2, numbers)
# REPLACE: Controllo chi ha il valore minore (genitore o figlio) e lo inserisco in lista per la prossima generazione
if newscore1 < oldscore1:
population[j] = figlio1
scores.append(newscore1)
else:
scores.append(oldscore1)
if newscore2 < oldscore2:
population[j+1] = figlio2
scores.append(newscore2)
else:
scores.append(oldscore2)
# Se i cromosomi non hanno avuto figli, passano direttamente loro alla prossima generazione
else:
scores.append(problem(population[j], numbers))
scores.append(problem(population[j+1], numbers))
j += 2
print(scores)
# Calcolo varie statistiche sulla fitness e stampo su terminale
gen_avg = sum(scores) / size
gen_best = min(scores)
gen_sol = population[scores.index(min(scores))]
print('> GENERATION AVERAGE:', gen_avg)
print('> GENERATION BEST:', gen_best)
print('> BEST SOLUTION:', gen_sol, '\n')
# controllo se ho trovato la soluzione ottima: smetto di far evolvere la popolazione
if gen_best == sum_numbers % 2:
break
return gen_sol, gen_best
def main():
# Parametri per l'algoritmo genetico GA
problem = partition
dimensione = 50
y = []
for i in range(dimensione):
y.append(random.randrange(1, 100))
size = 30
xover = 0.25
mutation = 0.9
generazioni = 100
soluzione, valore = ga(problem, y, size, xover, mutation, generazioni)
print(y)
sol = (y, soluzione, valore)
# Stampo su file per controllo
with open('solutions.txt', 'a') as f:
f.write(str(sol)+'\n')
ripeti = 100
while ripeti:
main()
ripeti -= 1
| 35.124138 | 132 | 0.595131 | import random
def partition(x, y):
a = b = 0
for i in range(len(x)):
if not x[i]:
a += y[i]
else:
b += y[i]
# Ritorno il valore assoluto (più è vicino a 0 e migliore è la partizione)
return abs(a - b)
def gen_population(s, n):
population = []
for i in range(0, s):
indv = []
# Ogni cromosoma contiene n bit (contenenti 0 o 1), pari alla dimensione della lista di interi
for j in range(n):
indv.append(random.randint(0, 1))
population.append(indv)
return population
# Funzione che esegue il crossover a un punto, dati i due genitori e il punto in questione
def crossover(g1, g2, p):
f1 = g1[:p] + g2[p:]
f2 = g1[p:] + g2[:p]
# Ritorna i due figli della coppia
return f1, f2
# Funzione per gestire la fase di mutazione
def mutate(g1, dim):
gene = random.randrange(0, dim)
# In base al numero random generato, inverto il bit a quella posizione nel cromosoma
if g1[gene]:
g1[gene] = 0
else:
g1[gene] = 1
# Ritorno il cromosoma modificato
return g1
# Funzione base dell'algoritmo genetico
def ga(problem, numbers, real_size, xover, mutation, generations):
size = real_size + real_size % 2
population = gen_population(size, len(numbers))
sum_numbers = sum(numbers)
for i in range(1, generations + 1):
print('Generazione: ', i)
j = 0
scores = []
while j <= size - 2:
mating = random.random()
if mating > xover:
point = random.randrange(0, size)
figlio1, figlio2 = crossover(population[j], population[j+1], point)
evolution1 = random.random()
evolution2 = random.random()
if evolution1 > mutation:
figlio1 = mutate(figlio1, len(numbers))
if evolution2 > mutation:
figlio2 = mutate(figlio2, len(numbers))
oldscore1 = problem(population[j], numbers)
newscore1 = problem(figlio1, numbers)
oldscore2 = problem(population[j+1], numbers)
newscore2 = problem(figlio2, numbers)
if newscore1 < oldscore1:
population[j] = figlio1
scores.append(newscore1)
else:
scores.append(oldscore1)
if newscore2 < oldscore2:
population[j+1] = figlio2
scores.append(newscore2)
else:
scores.append(oldscore2)
else:
scores.append(problem(population[j], numbers))
scores.append(problem(population[j+1], numbers))
j += 2
print(scores)
gen_avg = sum(scores) / size
gen_best = min(scores)
gen_sol = population[scores.index(min(scores))]
print('> GENERATION AVERAGE:', gen_avg)
print('> GENERATION BEST:', gen_best)
print('> BEST SOLUTION:', gen_sol, '\n')
if gen_best == sum_numbers % 2:
break
return gen_sol, gen_best
def main():
problem = partition
dimensione = 50
y = []
for i in range(dimensione):
y.append(random.randrange(1, 100))
size = 30
xover = 0.25
mutation = 0.9
generazioni = 100
soluzione, valore = ga(problem, y, size, xover, mutation, generazioni)
print(y)
sol = (y, soluzione, valore)
# Stampo su file per controllo
with open('solutions.txt', 'a') as f:
f.write(str(sol)+'\n')
ripeti = 100
while ripeti:
main()
ripeti -= 1
| true | true |
1c3c472bd30540645228b38acb96974af7c41f45 | 142 | py | Python | lan for loop.py | keerthana1502/python_practice | 8c0499e014826af78f9a88730551ace3fa79686d | [
"bzip2-1.0.6"
] | null | null | null | lan for loop.py | keerthana1502/python_practice | 8c0499e014826af78f9a88730551ace3fa79686d | [
"bzip2-1.0.6"
] | null | null | null | lan for loop.py | keerthana1502/python_practice | 8c0499e014826af78f9a88730551ace3fa79686d | [
"bzip2-1.0.6"
] | null | null | null | def func(x):
y=4
return lambda z: x+y+z
for i in range(5):
closure=func(i)
print("closure ",i+5," = ","closure ",closure(i+5)) | 23.666667 | 55 | 0.56338 | def func(x):
y=4
return lambda z: x+y+z
for i in range(5):
closure=func(i)
print("closure ",i+5," = ","closure ",closure(i+5)) | true | true |
1c3c48637e933d14a61af82aacba2c2a82af796c | 1,686 | py | Python | homeassistant/components/shelly/switch.py | inishchith/core | 90892d275c259088ed302bdaa8838303a6ef4094 | [
"Apache-2.0"
] | 1 | 2020-03-10T21:16:17.000Z | 2020-03-10T21:16:17.000Z | homeassistant/components/shelly/switch.py | inishchith/core | 90892d275c259088ed302bdaa8838303a6ef4094 | [
"Apache-2.0"
] | 38 | 2020-10-15T06:46:44.000Z | 2022-03-31T06:02:48.000Z | homeassistant/components/shelly/switch.py | michaellunzer/core | 1b0c421300812c026dcc077731250cd91e6c618a | [
"Apache-2.0"
] | null | null | null | """Switch for Shelly."""
from aioshelly import RelayBlock
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from . import ShellyDeviceWrapper
from .const import DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for device."""
wrapper = hass.data[DOMAIN][config_entry.entry_id]
relay_blocks = [block for block in wrapper.device.blocks if block.type == "relay"]
if not relay_blocks:
return
async_add_entities(RelaySwitch(wrapper, block) for block in relay_blocks)
class RelaySwitch(ShellyBlockEntity, SwitchEntity):
"""Switch that controls a relay block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: RelayBlock) -> None:
"""Initialize relay switch."""
super().__init__(wrapper, block)
self.control_result = None
@property
def is_on(self) -> bool:
"""If switch is on."""
if self.control_result:
return self.control_result["ison"]
return self.block.output
async def async_turn_on(self, **kwargs):
"""Turn on relay."""
self.control_result = await self.block.set_state(turn="on")
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn off relay."""
self.control_result = await self.block.set_state(turn="off")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
| 30.654545 | 86 | 0.688612 | from aioshelly import RelayBlock
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from . import ShellyDeviceWrapper
from .const import DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
wrapper = hass.data[DOMAIN][config_entry.entry_id]
relay_blocks = [block for block in wrapper.device.blocks if block.type == "relay"]
if not relay_blocks:
return
async_add_entities(RelaySwitch(wrapper, block) for block in relay_blocks)
class RelaySwitch(ShellyBlockEntity, SwitchEntity):
def __init__(self, wrapper: ShellyDeviceWrapper, block: RelayBlock) -> None:
super().__init__(wrapper, block)
self.control_result = None
@property
def is_on(self) -> bool:
if self.control_result:
return self.control_result["ison"]
return self.block.output
async def async_turn_on(self, **kwargs):
self.control_result = await self.block.set_state(turn="on")
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
self.control_result = await self.block.set_state(turn="off")
self.async_write_ha_state()
@callback
def _update_callback(self):
self.control_result = None
super()._update_callback()
| true | true |
1c3c4a2cce80f170a0f8ed6fe9213991a3b0314f | 1,366 | py | Python | configs/_base_/models/emanet_r50-d8.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/emanet_r50-d8.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/emanet_r50-d8.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='EMAHead',
in_channels=2048,
in_index=3,
channels=256,
ema_channels=512,
num_bases=64,
num_stages=3,
momentum=0.1,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
# model training and testing settings
train_cfg = dict()
test_cfg = dict(mode='whole')
| 28.458333 | 75 | 0.564422 |
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='EMAHead',
in_channels=2048,
in_index=3,
channels=256,
ema_channels=512,
num_bases=64,
num_stages=3,
momentum=0.1,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
train_cfg = dict()
test_cfg = dict(mode='whole')
| true | true |
1c3c4a74eaae277807fe507f81a945f2f9b01f2f | 313 | py | Python | vnpy/api/websocket/__init__.py | bixia/MyVnpy | c0b464dce2ca65b0d040472b53a581d933c8b742 | [
"MIT"
] | null | null | null | vnpy/api/websocket/__init__.py | bixia/MyVnpy | c0b464dce2ca65b0d040472b53a581d933c8b742 | [
"MIT"
] | null | null | null | vnpy/api/websocket/__init__.py | bixia/MyVnpy | c0b464dce2ca65b0d040472b53a581d933c8b742 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
@File : __init__.py
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2021/4/3 10:10 Li Qiwen 1.0 None
'''
from .websocket import WebsocketClient | 31.3 | 56 | 0.504792 |
from .websocket import WebsocketClient | true | true |
1c3c4b065d4f5d21bbce25de69e1bad508491d6d | 17,615 | py | Python | isi_sdk_8_1_0/isi_sdk_8_1_0/models/upgrade_cluster.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/upgrade_cluster.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/upgrade_cluster.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_0.models.cluster_nodes_onefs_version import ClusterNodesOnefsVersion # noqa: F401,E501
from isi_sdk_8_1_0.models.upgrade_cluster_cluster_overview import UpgradeClusterClusterOverview # noqa: F401,E501
from isi_sdk_8_1_0.models.upgrade_cluster_upgrade_settings import UpgradeClusterUpgradeSettings # noqa: F401,E501
class UpgradeCluster(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cluster_overview': 'UpgradeClusterClusterOverview',
'cluster_state': 'str',
'current_process': 'str',
'finish_time': 'str',
'install_image_path': 'str',
'node_median_time': 'int',
'onefs_version_current': 'ClusterNodesOnefsVersion',
'onefs_version_upgrade': 'ClusterNodesOnefsVersion',
'patch_action': 'str',
'patch_name': 'str',
'start_time': 'str',
'upgrade_is_committed': 'bool',
'upgrade_settings': 'UpgradeClusterUpgradeSettings',
'upgrade_triggered_time': 'str'
}
attribute_map = {
'cluster_overview': 'cluster_overview',
'cluster_state': 'cluster_state',
'current_process': 'current_process',
'finish_time': 'finish_time',
'install_image_path': 'install_image_path',
'node_median_time': 'node_median_time',
'onefs_version_current': 'onefs_version_current',
'onefs_version_upgrade': 'onefs_version_upgrade',
'patch_action': 'patch_action',
'patch_name': 'patch_name',
'start_time': 'start_time',
'upgrade_is_committed': 'upgrade_is_committed',
'upgrade_settings': 'upgrade_settings',
'upgrade_triggered_time': 'upgrade_triggered_time'
}
def __init__(self, cluster_overview=None, cluster_state=None, current_process=None, finish_time=None, install_image_path=None, node_median_time=None, onefs_version_current=None, onefs_version_upgrade=None, patch_action=None, patch_name=None, start_time=None, upgrade_is_committed=None, upgrade_settings=None, upgrade_triggered_time=None): # noqa: E501
"""UpgradeCluster - a model defined in Swagger""" # noqa: E501
self._cluster_overview = None
self._cluster_state = None
self._current_process = None
self._finish_time = None
self._install_image_path = None
self._node_median_time = None
self._onefs_version_current = None
self._onefs_version_upgrade = None
self._patch_action = None
self._patch_name = None
self._start_time = None
self._upgrade_is_committed = None
self._upgrade_settings = None
self._upgrade_triggered_time = None
self.discriminator = None
if cluster_overview is not None:
self.cluster_overview = cluster_overview
if cluster_state is not None:
self.cluster_state = cluster_state
if current_process is not None:
self.current_process = current_process
if finish_time is not None:
self.finish_time = finish_time
if install_image_path is not None:
self.install_image_path = install_image_path
if node_median_time is not None:
self.node_median_time = node_median_time
if onefs_version_current is not None:
self.onefs_version_current = onefs_version_current
if onefs_version_upgrade is not None:
self.onefs_version_upgrade = onefs_version_upgrade
if patch_action is not None:
self.patch_action = patch_action
if patch_name is not None:
self.patch_name = patch_name
if start_time is not None:
self.start_time = start_time
if upgrade_is_committed is not None:
self.upgrade_is_committed = upgrade_is_committed
if upgrade_settings is not None:
self.upgrade_settings = upgrade_settings
if upgrade_triggered_time is not None:
self.upgrade_triggered_time = upgrade_triggered_time
@property
def cluster_overview(self):
"""Gets the cluster_overview of this UpgradeCluster. # noqa: E501
The cluster overview of an upgrade process. # noqa: E501
:return: The cluster_overview of this UpgradeCluster. # noqa: E501
:rtype: UpgradeClusterClusterOverview
"""
return self._cluster_overview
@cluster_overview.setter
def cluster_overview(self, cluster_overview):
"""Sets the cluster_overview of this UpgradeCluster.
The cluster overview of an upgrade process. # noqa: E501
:param cluster_overview: The cluster_overview of this UpgradeCluster. # noqa: E501
:type: UpgradeClusterClusterOverview
"""
self._cluster_overview = cluster_overview
@property
def cluster_state(self):
"""Gets the cluster_state of this UpgradeCluster. # noqa: E501
The different states of an upgrade, rollback, or assessment. One of the following values: 'committed', 'upgraded', 'partially upgraded', 'upgrading', 'rolling back', 'assessing', 'error' # noqa: E501
:return: The cluster_state of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._cluster_state
@cluster_state.setter
def cluster_state(self, cluster_state):
"""Sets the cluster_state of this UpgradeCluster.
The different states of an upgrade, rollback, or assessment. One of the following values: 'committed', 'upgraded', 'partially upgraded', 'upgrading', 'rolling back', 'assessing', 'error' # noqa: E501
:param cluster_state: The cluster_state of this UpgradeCluster. # noqa: E501
:type: str
"""
self._cluster_state = cluster_state
@property
def current_process(self):
"""Gets the current_process of this UpgradeCluster. # noqa: E501
The current upgrade activity. # noqa: E501
:return: The current_process of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._current_process
@current_process.setter
def current_process(self, current_process):
"""Sets the current_process of this UpgradeCluster.
The current upgrade activity. # noqa: E501
:param current_process: The current_process of this UpgradeCluster. # noqa: E501
:type: str
"""
self._current_process = current_process
@property
def finish_time(self):
"""Gets the finish_time of this UpgradeCluster. # noqa: E501
The time when a rollback, assessment or upgrade has finished completely. Use ISO 8601 standard. Null if the cluster_state is not 'upgraded'. # noqa: E501
:return: The finish_time of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._finish_time
@finish_time.setter
def finish_time(self, finish_time):
"""Sets the finish_time of this UpgradeCluster.
The time when a rollback, assessment or upgrade has finished completely. Use ISO 8601 standard. Null if the cluster_state is not 'upgraded'. # noqa: E501
:param finish_time: The finish_time of this UpgradeCluster. # noqa: E501
:type: str
"""
self._finish_time = finish_time
@property
def install_image_path(self):
"""Gets the install_image_path of this UpgradeCluster. # noqa: E501
The location (path) of the upgrade image which must be within /ifs. Null if the cluster_state is 'committed' or 'upgraded.' # noqa: E501
:return: The install_image_path of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._install_image_path
@install_image_path.setter
def install_image_path(self, install_image_path):
"""Sets the install_image_path of this UpgradeCluster.
The location (path) of the upgrade image which must be within /ifs. Null if the cluster_state is 'committed' or 'upgraded.' # noqa: E501
:param install_image_path: The install_image_path of this UpgradeCluster. # noqa: E501
:type: str
"""
self._install_image_path = install_image_path
@property
def node_median_time(self):
"""Gets the node_median_time of this UpgradeCluster. # noqa: E501
The median time (seconds) to complete each node so far during this upgrade. Before the first node in an upgrade has completed this key will have an associated null value. # noqa: E501
:return: The node_median_time of this UpgradeCluster. # noqa: E501
:rtype: int
"""
return self._node_median_time
@node_median_time.setter
def node_median_time(self, node_median_time):
"""Sets the node_median_time of this UpgradeCluster.
The median time (seconds) to complete each node so far during this upgrade. Before the first node in an upgrade has completed this key will have an associated null value. # noqa: E501
:param node_median_time: The node_median_time of this UpgradeCluster. # noqa: E501
:type: int
"""
self._node_median_time = node_median_time
@property
def onefs_version_current(self):
"""Gets the onefs_version_current of this UpgradeCluster. # noqa: E501
The current OneFS version before upgrade. # noqa: E501
:return: The onefs_version_current of this UpgradeCluster. # noqa: E501
:rtype: ClusterNodesOnefsVersion
"""
return self._onefs_version_current
@onefs_version_current.setter
def onefs_version_current(self, onefs_version_current):
"""Sets the onefs_version_current of this UpgradeCluster.
The current OneFS version before upgrade. # noqa: E501
:param onefs_version_current: The onefs_version_current of this UpgradeCluster. # noqa: E501
:type: ClusterNodesOnefsVersion
"""
self._onefs_version_current = onefs_version_current
@property
def onefs_version_upgrade(self):
"""Gets the onefs_version_upgrade of this UpgradeCluster. # noqa: E501
The OneFS version the user is attempting to upgrade to. Null if the cluster_state is 'committed' or 'assessing.' # noqa: E501
:return: The onefs_version_upgrade of this UpgradeCluster. # noqa: E501
:rtype: ClusterNodesOnefsVersion
"""
return self._onefs_version_upgrade
@onefs_version_upgrade.setter
def onefs_version_upgrade(self, onefs_version_upgrade):
"""Sets the onefs_version_upgrade of this UpgradeCluster.
The OneFS version the user is attempting to upgrade to. Null if the cluster_state is 'committed' or 'assessing.' # noqa: E501
:param onefs_version_upgrade: The onefs_version_upgrade of this UpgradeCluster. # noqa: E501
:type: ClusterNodesOnefsVersion
"""
self._onefs_version_upgrade = onefs_version_upgrade
@property
def patch_action(self):
"""Gets the patch_action of this UpgradeCluster. # noqa: E501
The most recent patch action performed. # noqa: E501
:return: The patch_action of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._patch_action
@patch_action.setter
def patch_action(self, patch_action):
"""Sets the patch_action of this UpgradeCluster.
The most recent patch action performed. # noqa: E501
:param patch_action: The patch_action of this UpgradeCluster. # noqa: E501
:type: str
"""
self._patch_action = patch_action
@property
def patch_name(self):
"""Gets the patch_name of this UpgradeCluster. # noqa: E501
The patch with the most recent patch action. # noqa: E501
:return: The patch_name of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._patch_name
@patch_name.setter
def patch_name(self, patch_name):
"""Sets the patch_name of this UpgradeCluster.
The patch with the most recent patch action. # noqa: E501
:param patch_name: The patch_name of this UpgradeCluster. # noqa: E501
:type: str
"""
self._patch_name = patch_name
@property
def start_time(self):
"""Gets the start_time of this UpgradeCluster. # noqa: E501
The time when an upgrade, rollback, or assessment was started. Use ISO 8601 standard. Null if the cluster_state is 'committed' or 'partially upgraded.' # noqa: E501
:return: The start_time of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this UpgradeCluster.
The time when an upgrade, rollback, or assessment was started. Use ISO 8601 standard. Null if the cluster_state is 'committed' or 'partially upgraded.' # noqa: E501
:param start_time: The start_time of this UpgradeCluster. # noqa: E501
:type: str
"""
self._start_time = start_time
@property
def upgrade_is_committed(self):
"""Gets the upgrade_is_committed of this UpgradeCluster. # noqa: E501
True if upgrade is committed. # noqa: E501
:return: The upgrade_is_committed of this UpgradeCluster. # noqa: E501
:rtype: bool
"""
return self._upgrade_is_committed
@upgrade_is_committed.setter
def upgrade_is_committed(self, upgrade_is_committed):
"""Sets the upgrade_is_committed of this UpgradeCluster.
True if upgrade is committed. # noqa: E501
:param upgrade_is_committed: The upgrade_is_committed of this UpgradeCluster. # noqa: E501
:type: bool
"""
self._upgrade_is_committed = upgrade_is_committed
@property
def upgrade_settings(self):
"""Gets the upgrade_settings of this UpgradeCluster. # noqa: E501
The settings necessary when starting an upgrade. Null if the cluster_state is not 'upgrading' or 'partially upgraded.' or 'error'. # noqa: E501
:return: The upgrade_settings of this UpgradeCluster. # noqa: E501
:rtype: UpgradeClusterUpgradeSettings
"""
return self._upgrade_settings
@upgrade_settings.setter
def upgrade_settings(self, upgrade_settings):
"""Sets the upgrade_settings of this UpgradeCluster.
The settings necessary when starting an upgrade. Null if the cluster_state is not 'upgrading' or 'partially upgraded.' or 'error'. # noqa: E501
:param upgrade_settings: The upgrade_settings of this UpgradeCluster. # noqa: E501
:type: UpgradeClusterUpgradeSettings
"""
self._upgrade_settings = upgrade_settings
@property
def upgrade_triggered_time(self):
"""Gets the upgrade_triggered_time of this UpgradeCluster. # noqa: E501
Time at which upgrade was originally requested. # noqa: E501
:return: The upgrade_triggered_time of this UpgradeCluster. # noqa: E501
:rtype: str
"""
return self._upgrade_triggered_time
@upgrade_triggered_time.setter
def upgrade_triggered_time(self, upgrade_triggered_time):
"""Sets the upgrade_triggered_time of this UpgradeCluster.
Time at which upgrade was originally requested. # noqa: E501
:param upgrade_triggered_time: The upgrade_triggered_time of this UpgradeCluster. # noqa: E501
:type: str
"""
self._upgrade_triggered_time = upgrade_triggered_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpgradeCluster):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.469979 | 356 | 0.663582 |
import pprint
import re
import six
from isi_sdk_8_1_0.models.cluster_nodes_onefs_version import ClusterNodesOnefsVersion
from isi_sdk_8_1_0.models.upgrade_cluster_cluster_overview import UpgradeClusterClusterOverview
from isi_sdk_8_1_0.models.upgrade_cluster_upgrade_settings import UpgradeClusterUpgradeSettings
class UpgradeCluster(object):
swagger_types = {
'cluster_overview': 'UpgradeClusterClusterOverview',
'cluster_state': 'str',
'current_process': 'str',
'finish_time': 'str',
'install_image_path': 'str',
'node_median_time': 'int',
'onefs_version_current': 'ClusterNodesOnefsVersion',
'onefs_version_upgrade': 'ClusterNodesOnefsVersion',
'patch_action': 'str',
'patch_name': 'str',
'start_time': 'str',
'upgrade_is_committed': 'bool',
'upgrade_settings': 'UpgradeClusterUpgradeSettings',
'upgrade_triggered_time': 'str'
}
attribute_map = {
'cluster_overview': 'cluster_overview',
'cluster_state': 'cluster_state',
'current_process': 'current_process',
'finish_time': 'finish_time',
'install_image_path': 'install_image_path',
'node_median_time': 'node_median_time',
'onefs_version_current': 'onefs_version_current',
'onefs_version_upgrade': 'onefs_version_upgrade',
'patch_action': 'patch_action',
'patch_name': 'patch_name',
'start_time': 'start_time',
'upgrade_is_committed': 'upgrade_is_committed',
'upgrade_settings': 'upgrade_settings',
'upgrade_triggered_time': 'upgrade_triggered_time'
}
def __init__(self, cluster_overview=None, cluster_state=None, current_process=None, finish_time=None, install_image_path=None, node_median_time=None, onefs_version_current=None, onefs_version_upgrade=None, patch_action=None, patch_name=None, start_time=None, upgrade_is_committed=None, upgrade_settings=None, upgrade_triggered_time=None):
self._cluster_overview = None
self._cluster_state = None
self._current_process = None
self._finish_time = None
self._install_image_path = None
self._node_median_time = None
self._onefs_version_current = None
self._onefs_version_upgrade = None
self._patch_action = None
self._patch_name = None
self._start_time = None
self._upgrade_is_committed = None
self._upgrade_settings = None
self._upgrade_triggered_time = None
self.discriminator = None
if cluster_overview is not None:
self.cluster_overview = cluster_overview
if cluster_state is not None:
self.cluster_state = cluster_state
if current_process is not None:
self.current_process = current_process
if finish_time is not None:
self.finish_time = finish_time
if install_image_path is not None:
self.install_image_path = install_image_path
if node_median_time is not None:
self.node_median_time = node_median_time
if onefs_version_current is not None:
self.onefs_version_current = onefs_version_current
if onefs_version_upgrade is not None:
self.onefs_version_upgrade = onefs_version_upgrade
if patch_action is not None:
self.patch_action = patch_action
if patch_name is not None:
self.patch_name = patch_name
if start_time is not None:
self.start_time = start_time
if upgrade_is_committed is not None:
self.upgrade_is_committed = upgrade_is_committed
if upgrade_settings is not None:
self.upgrade_settings = upgrade_settings
if upgrade_triggered_time is not None:
self.upgrade_triggered_time = upgrade_triggered_time
@property
def cluster_overview(self):
return self._cluster_overview
@cluster_overview.setter
def cluster_overview(self, cluster_overview):
self._cluster_overview = cluster_overview
@property
def cluster_state(self):
return self._cluster_state
@cluster_state.setter
def cluster_state(self, cluster_state):
self._cluster_state = cluster_state
@property
def current_process(self):
return self._current_process
@current_process.setter
def current_process(self, current_process):
self._current_process = current_process
@property
def finish_time(self):
return self._finish_time
@finish_time.setter
def finish_time(self, finish_time):
self._finish_time = finish_time
@property
def install_image_path(self):
return self._install_image_path
@install_image_path.setter
def install_image_path(self, install_image_path):
self._install_image_path = install_image_path
@property
def node_median_time(self):
return self._node_median_time
@node_median_time.setter
def node_median_time(self, node_median_time):
self._node_median_time = node_median_time
@property
def onefs_version_current(self):
return self._onefs_version_current
@onefs_version_current.setter
def onefs_version_current(self, onefs_version_current):
self._onefs_version_current = onefs_version_current
@property
def onefs_version_upgrade(self):
return self._onefs_version_upgrade
@onefs_version_upgrade.setter
def onefs_version_upgrade(self, onefs_version_upgrade):
self._onefs_version_upgrade = onefs_version_upgrade
@property
def patch_action(self):
return self._patch_action
@patch_action.setter
def patch_action(self, patch_action):
self._patch_action = patch_action
@property
def patch_name(self):
return self._patch_name
@patch_name.setter
def patch_name(self, patch_name):
self._patch_name = patch_name
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, start_time):
self._start_time = start_time
@property
def upgrade_is_committed(self):
return self._upgrade_is_committed
@upgrade_is_committed.setter
def upgrade_is_committed(self, upgrade_is_committed):
self._upgrade_is_committed = upgrade_is_committed
@property
def upgrade_settings(self):
return self._upgrade_settings
@upgrade_settings.setter
def upgrade_settings(self, upgrade_settings):
self._upgrade_settings = upgrade_settings
@property
def upgrade_triggered_time(self):
return self._upgrade_triggered_time
@upgrade_triggered_time.setter
def upgrade_triggered_time(self, upgrade_triggered_time):
self._upgrade_triggered_time = upgrade_triggered_time
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, UpgradeCluster):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c3c4b0bd7753094384ce55ea4377074e4dd8d8f | 1,667 | py | Python | tools/tinyos/python/__init__.py | thp-comnets/tinyos-main | a8363f9acc0cd7361de31c1531858d2bfb2a75df | [
"BSD-3-Clause"
] | 1 | 2017-11-09T06:07:24.000Z | 2017-11-09T06:07:24.000Z | tinyos3/__init__.py | gollum18/tinyos3 | 925e2997b001a8816f601a214b6b1fc86c2c5b03 | [
"CNRI-Python"
] | null | null | null | tinyos3/__init__.py | gollum18/tinyos3 | 925e2997b001a8816f601a214b6b1fc86c2c5b03 | [
"CNRI-Python"
] | 1 | 2015-01-26T17:39:57.000Z | 2015-01-26T17:39:57.000Z | #
# Copyright (c) 2005
# The President and Fellows of Harvard College.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Geoffrey Mainland <mainland@eecs.harvard.edu>
#
__all__ = ["message", "packet", "utils", "tossim", "misc"]
| 50.515152 | 77 | 0.770846 |
__all__ = ["message", "packet", "utils", "tossim", "misc"]
| true | true |
1c3c4b96adafe33c942e391854eb089741427164 | 1,196 | py | Python | platypush/backend/pushbullet/listener.py | BlackLight/platypush | 6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a | [
"MIT"
] | 228 | 2018-01-30T11:17:09.000Z | 2022-03-24T11:22:26.000Z | platypush/backend/pushbullet/listener.py | BlackLight/platypush | 6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a | [
"MIT"
] | 167 | 2017-12-11T19:35:38.000Z | 2022-03-27T14:45:30.000Z | platypush/backend/pushbullet/listener.py | BlackLight/runbullet | 8d26c8634d2677b4402f0a21b9ab8244b44640db | [
"MIT"
] | 16 | 2018-05-03T07:31:56.000Z | 2021-12-05T19:27:37.000Z | import logging
import time
from typing import Callable, Optional
from pushbullet import Listener as _Listener
class Listener(_Listener):
"""
Extends the Pushbullet Listener object by adding ``on_open`` and ``on_close`` handlers.
"""
def __init__(self,
*args,
on_open: Optional[Callable[[], None]] = None,
on_close: Optional[Callable[[], None]] = None,
**kwargs):
super().__init__(*args, **kwargs)
self._on_open_hndl = on_open
self._on_close_hndl = on_close
self.logger = logging.getLogger(__name__)
def _on_open(self):
def callback(*_):
self.connected = True
self.last_update = time.time()
if self._on_open_hndl:
self._on_open_hndl()
return callback
def _on_close(self):
def callback(*_):
self.connected = False
if self._on_close_hndl:
try:
self._on_close_hndl()
except Exception as e:
self.logger.warning(f'Pushbullet listener close error: {e}')
return callback
# vim:sw=4:ts=4:et:
| 27.181818 | 91 | 0.562709 | import logging
import time
from typing import Callable, Optional
from pushbullet import Listener as _Listener
class Listener(_Listener):
def __init__(self,
*args,
on_open: Optional[Callable[[], None]] = None,
on_close: Optional[Callable[[], None]] = None,
**kwargs):
super().__init__(*args, **kwargs)
self._on_open_hndl = on_open
self._on_close_hndl = on_close
self.logger = logging.getLogger(__name__)
def _on_open(self):
def callback(*_):
self.connected = True
self.last_update = time.time()
if self._on_open_hndl:
self._on_open_hndl()
return callback
def _on_close(self):
def callback(*_):
self.connected = False
if self._on_close_hndl:
try:
self._on_close_hndl()
except Exception as e:
self.logger.warning(f'Pushbullet listener close error: {e}')
return callback
| true | true |
1c3c4bdd31b8713dcd3ebb3084cb9d78ed721b3d | 214,380 | py | Python | OldVersions/Viav021.py | GreenGilad/VIA | 01b408f3abaf3b42ea13cccd49748cafdca56f07 | [
"MIT"
] | 24 | 2021-02-12T12:31:43.000Z | 2022-03-27T12:52:05.000Z | OldVersions/Viav021.py | GreenGilad/VIA | 01b408f3abaf3b42ea13cccd49748cafdca56f07 | [
"MIT"
] | 13 | 2021-09-22T21:59:00.000Z | 2022-01-24T12:40:01.000Z | OldVersions/Viav021.py | GreenGilad/VIA | 01b408f3abaf3b42ea13cccd49748cafdca56f07 | [
"MIT"
] | 11 | 2021-08-04T05:33:18.000Z | 2022-03-10T22:57:44.000Z | import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, csgraph
import scipy
import igraph as ig
import leidenalg
import time
import hnswlib
import matplotlib.pyplot as plt
import matplotlib
import math
import multiprocessing
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy import sparse
from sklearn.metrics.pairwise import euclidean_distances
import umap
import scanpy as sc
from MulticoreTSNE import MulticoreTSNE as TSNE
import random
from scipy.sparse.csgraph import connected_components
import pygam as pg
import matplotlib.colors as colors
import matplotlib.cm as cm
import palantir #/home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
# version before translating chinese on Feb13
# jan2020 Righclick->GIT->Repository-> PUSH
def plot_sc_pb(ax, embedding, prob, ti):
#threshold = #np.percentile(prob, 95)#np.mean(prob) + 3 * np.std(prob)
#print('thresold', threshold, np.max(prob))
#prob = [x if x < threshold else threshold for x in prob]
cmap = matplotlib.cm.get_cmap('viridis')
norm = matplotlib.colors.Normalize(vmin=0, vmax=np.max(prob))
prob = np.asarray(prob)
c = cmap(norm(prob))
c = c.reshape(-1, 4)
loc_c = np.where(prob <= 0.3)[0]
c[loc_c, 3] = 0.2
loc_c = np.where((prob > 0.3) & (prob <= 0.5))[0]
c[loc_c, 3] = 0.5
loc_c = np.where((prob > 0.5) & (prob <= 0.7))[0]
c[loc_c, 3] = 0.8
loc_c = np.where((prob >0.7))[0]
c[loc_c, 3] = 0.8
ax.scatter(embedding[:, 0], embedding[:, 1], c=c, s=10, cmap='viridis',
edgecolors='none')
ax.set_title('Target: ' + str(ti))
def simulate_multinomial(vmultinomial):
r = np.random.uniform(0.0, 1.0)
CS = np.cumsum(vmultinomial)
CS = np.insert(CS, 0, 0)
m = (np.where(CS < r))[0]
nextState = m[len(m) - 1]
return nextState
def sc_loc_ofsuperCluster_PCAspace(p0, p1,idx):
# ci_list: single cell location of average location of supercluster based on embedded space hnsw
#Returns location (index) in unsampled PCA space of the location of the super-cluster or sub-terminal-cluster and root
print("dict of terminal state pairs, Super: sub: ", p1.dict_terminal_super_sub_pairs)
p0_labels = np.asarray(p0.labels)
p1_labels = np.asarray(p1.labels)
p1_sc_markov_pt = p1.single_cell_pt_markov
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
# loc_i = np.where(p0_labels == ci)[0]
# val_pt = [p1.single_cell_pt_markov[i] for i in loc_i]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 80
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
#print('loc_i', loc_i)
#print('len p1')
# loc_i = np.where(p0.labels == ci)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
else:
# loc_i = np.where(np.asarray(p0.labels) == ci)[0]
loc_i = np.where(p0_labels == ci)[0]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
X_ds = p0.data[idx]
p_ds = hnswlib.Index(space='l2', dim=p0.data.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
new_superclust_index_ds = []
for item in ci_list:
labelsq, distances = p_ds.knn_query(p0.data[item, :], k=1)
new_superclust_index_ds.append(labelsq[0][0])
return new_superclust_index_ds
def sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx):
# ci_list: single cell location of average location of supercluster based on embedded space hnsw
# idx is the indices of the subsampled elements
knn_hnsw = hnswlib.Index(space='l2', dim=embedding.shape[1])
knn_hnsw.init_index(max_elements=embedding.shape[0], ef_construction=200, M=16)
knn_hnsw.add_items(embedding)
knn_hnsw.set_ef(50)
p0_labels = np.asarray(p0.labels)[idx]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_markov_pt = list(np.asarray(p1.single_cell_pt_markov)[idx])
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
# loc_i = np.where(p0_labels == ci)[0]
# val_pt = [p1.single_cell_pt_markov[i] for i in loc_i]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 80) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
#print('loc_i', loc_i)
#print('len p1')
# loc_i = np.where(p0.labels == ci)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
else:
# loc_i = np.where(np.asarray(p0.labels) == ci)[0]
loc_i = np.where(p0_labels == ci)[0]
# temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distancesq = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
# labels, distances = p.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
return knn_hnsw, ci_list
def draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, G, idx, X_data):
# G is the igraph knn (low K) used for shortest path. no idx needed as it's made on full sample
# knn_hnsw is the knn made in the embedded space used for query
# X_data is the PCA space with all samples
# idx is the selected indices of the downsampled samples
y_root = []
x_root = []
root1_list = []
p1_sc_bp = p1.single_cell_bp[idx, :]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
p1_cc = p1.connected_comp_labels
X_ds = X_data[idx, :]
p_ds = hnswlib.Index(space='l2', dim=X_ds.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for ii, r_i in enumerate(p1.root):
loc_i = np.where(p1_labels == p1.root[ii])[0]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labels_root, distances_root = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # sc location in embedded space of root cell
x_root.append(embedding[labels_root, 0][0])
y_root.append(embedding[labels_root, 1][0])
labelsroot1, distances1 = p1.knn_struct.knn_query(X_ds[labels_root[0][0], :],
k=1) # index of sc-root-cell in the full-PCA space. Need for path
root1_list.append(labelsroot1[0][0])
# single-cell branch probability evolution probability
for i, ti in enumerate(p1.terminal_clusters):
print('i, ti, p1.root, p1.connected', i, ti, p1.root, p1_cc)
print('root1list', root1_list)
root_i = p1.root[p1_cc[ti]]
xx_root = x_root[p1_cc[ti]]
yy_root = y_root[p1_cc[ti]]
fig, ax = plt.subplots()
plot_sc_pb(ax, embedding, p1_sc_bp[:, i], ti)
loc_i = np.where(p1_labels == ti)[0]
val_pt = [p1_sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in
loc_i] # location of sc nearest to average location of terminal clus in the EMBEDDED space
y = [embedding[yi, 1] for yi in loc_i]
labels, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # knn_hnsw is knn of embedded space
x_sc = embedding[labels[0], 0] # terminal sc location in the embedded space
y_sc = embedding[labels[0], 1]
start_time = time.time()
labelsq1, distances1 = p1.knn_struct.knn_query(X_ds[labels[0][0], :],
k=1) # find the nearest neighbor in the PCA-space full graph
print('labels root and labels[0]', root1_list[p1_cc[ti]], labels[0])
## path = G.get_shortest_paths(labels_root[0][0], to=labels[0][0], weights='weight') #G is the knn of all sc points
# path = G.get_shortest_paths(labelsroot1[0][0], to=labelsq1[0][0], weights='weight') # G is the knn of all sc points
path = G.get_shortest_paths(root1_list[p1_cc[ti]], to=labelsq1[0][0],
weights='weight') # G is the knn of all sc points
path_idx = [] # find the single-cell which is nearest to the average-location of a terminal cluster
# get the nearest-neighbor in this downsampled PCA-space graph. These will make the new path-way points
for pii in path[0]:
labelsq, distances = p_ds.knn_query(X_data[pii, :], k=1)
# print('location of pathway point in idx-space', labelsq[0][0])
path_idx.append(labelsq[0][0])
print(f"get_shortest_paths time: {time.time()-start_time}")
print('path', path)
print('new path indices', path_idx)
path = path_idx
n_orange = len(path)
orange_m = np.zeros((n_orange, 3))
for enum_point, point in enumerate(path):
#ax.text(embedding[point, 0], embedding[point, 1], 'D ' + str(enum_point), color='blue', fontsize=8)
orange_m[enum_point, 0] = embedding[point, 0]
orange_m[enum_point, 1] = embedding[point, 1]
orange_m[enum_point, 2] = p1_sc_pt_markov[ point]
from sklearn.neighbors import NearestNeighbors
k_orange = 3 # increasing can smoothen in simple trajectories (Toy)
nbrs = NearestNeighbors(n_neighbors=k_orange, algorithm='ball_tree').fit(orange_m[:, 0:])
distances, indices = nbrs.kneighbors(orange_m[:, 0:])
row_list = []
col_list = []
dist_list = []
for i_or in range(n_orange):
for j_or in range(1, k_orange):
row_list.append(i_or)
col_list.append(indices[i_or, j_or])
dist_list.append(distances[i_or, j_or])
print('target number ' + str(ti))
orange_adjacency_knn = csr_matrix((np.array(dist_list), (np.array(row_list), np.array(col_list))),
shape=(n_orange, n_orange))
print('orange adj knn shape', orange_adjacency_knn.shape)
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False, return_labels=True)
for enum_point, point in enumerate(path): # [0]):
orange_m[enum_point, 2] = p1_sc_pt_markov[point] * p1_sc_pt_markov[
point] * 2 # p1.single_cell_pt_markov[point] * p1.single_cell_pt_markov[point]*2
while n_mst > 1:
comp_root = comp_labels_mst[0]
# print('comp-root', comp_root)
min_ed = 9999999
loc_comp_i = np.where(comp_labels_mst == comp_root)[0]
loc_comp_noti = np.where(comp_labels_mst != comp_root)[0]
# print('compi', loc_comp_i)
# print('comp_noti', loc_comp_noti)
orange_pt_val = [orange_m[cc, 2] for cc in loc_comp_i]
loc_comp_i_revised = [loc_comp_i[cc] for cc in range(len(orange_pt_val)) if
orange_pt_val[cc] >= np.percentile(orange_pt_val, 70)]
for nn_i in loc_comp_i_revised:
ed = euclidean_distances(orange_m[nn_i, :].reshape(1, -1), orange_m[loc_comp_noti])
if np.min(ed) < min_ed:
ed_where_min = np.where(ed[0] == np.min(ed))[0][0]
# print('ed where min', ed_where_min, np.where(ed[0] == np.min(ed)))
min_ed = np.min(ed)
ed_loc_end = loc_comp_noti[ed_where_min]
ed_loc_start = nn_i
# print('min ed', min_ed)
print('Connecting components before sc-bp-GAM: the closest pair of points', ed_loc_start, ed_loc_end)
orange_adjacency_knn[ed_loc_start, ed_loc_end] = min_ed
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False,
return_labels=True)
if n_mst == 1: #if no disconnected components in the graph
(orange_sources, orange_targets) = orange_adjacency_knn.nonzero()
orange_edgelist = list(zip(orange_sources.tolist(), orange_targets.tolist()))
G_orange = ig.Graph(n=orange_adjacency_knn.shape[0], edges=orange_edgelist,
edge_attrs={'weight': orange_adjacency_knn.data.tolist()}, )
path_orange = G_orange.get_shortest_paths(0, to=orange_adjacency_knn.shape[0] - 1, weights='weight')[0]
print('path orange', path_orange)
len_path_orange = len(path_orange)
for path_i in range(len_path_orange - 1):
path_x_start = orange_m[path_orange[path_i], 0]
path_x_end = orange_m[path_orange[path_i + 1], 0]
orange_x = [orange_m[path_orange[path_i], 0], orange_m[path_orange[path_i + 1], 0]]
orange_minx = min(orange_x)
orange_maxx = max(orange_x)
orange_y = [orange_m[path_orange[path_i], 1], orange_m[path_orange[path_i + 1], 1]]
orange_miny = min(orange_y)
orange_maxy = max(orange_y)
orange_embedding_sub = embedding[
((embedding[:, 0] <= orange_maxx) & (embedding[:, 0] >= orange_minx)) & (
(embedding[:, 1] <= orange_maxy) & ((embedding[:, 1] >= orange_miny)))]
print('orange sub size', orange_embedding_sub.shape)
if (orange_maxy - orange_miny > 5) | (orange_maxx - orange_minx > 5):
orange_n_reps = 150
else:
orange_n_reps = 100
or_reps = np.repeat(np.array([[orange_x[0], orange_y[0]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
or_reps = np.repeat(np.array([[orange_x[1], orange_y[1]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
orangeGam = pg.LinearGAM(n_splines=8, spline_order=3, lam=10).fit(orange_embedding_sub[:, 0],
orange_embedding_sub[:, 1])
nx_spacing = 100
orange_GAM_xval = np.linspace(orange_minx, orange_maxx, nx_spacing * 2)
yg_orange = orangeGam.predict(X=orange_GAM_xval)
ax.plot(orange_GAM_xval, yg_orange, color='dimgrey', linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
cur_x1 = orange_GAM_xval[-1]
cur_y1 = yg_orange[-1]
cur_x2 = orange_GAM_xval[0]
cur_y2 = yg_orange[0]
if path_i >= 1:
for mmddi in range(2):
xy11 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy12 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
xy21 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy22 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
mmdd_temp_array = np.asarray([xy11, xy12, xy21, xy22])
mmdd_loc = np.where(mmdd_temp_array == np.min(mmdd_temp_array))[0][0]
if mmdd_loc == 0:
ax.plot([cur_x1, prev_x1], [cur_y1, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 1:
ax.plot([cur_x1, prev_x2], [cur_y1, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 2:
ax.plot([cur_x2, prev_x1], [cur_y2, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 3:
ax.plot([cur_x2, prev_x2], [cur_y2, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if (path_x_start > path_x_end): direction_arrow_orange = -1 # going LEFT
if (path_x_start <= path_x_end): direction_arrow_orange = 1 # going RIGHT
if (abs(
path_x_start - path_x_end) > 2.5): # |(abs(orange_m[path_i, 2] - orange_m[path_i + 1, 1]) > 1)):
if (direction_arrow_orange == -1): # & :
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing - 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing - 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5, color='dimgray', zorder=3)
if (direction_arrow_orange == 1): # &(abs(orange_m[path_i,0]-orange_m[path_i+1,0])>0.5):
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing + 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing + 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5,
color='dimgray', zorder=3)
prev_x1 = cur_x1
prev_y1 = cur_y1
prev_x2 = cur_x2
prev_y2 = cur_y2
ax.scatter(x_sc, y_sc, color='pink', zorder=3, label=str(ti), s=22)
ax.text(x_sc + 0.5, y_sc + 0.5, 'TS ' + str(ti), color='black')
return
def get_biased_weights(edgelist, weights, pt, round_no=1):
# print('weights', type(weights), weights)
# small nu means less biasing (0.5 is quite mild)
# larger nu (in our case 1/nu) means more aggressive biasing https://en.wikipedia.org/wiki/Generalised_logistic_function
print(len(edgelist), len(weights))
bias_weight = []
if round_no == 1:
b = 1 # 1 # 0.5
else:
b = 20 # 20 twenty is used for all the CD34 Human cells
K = 1
c = 0
C = 1
nu = 1
high_weights_th = np.mean(weights)
high_pt_th = np.percentile(np.asarray(pt), 80)
loc_high_weights = np.where(weights > high_weights_th)[0]
loc_high_pt = np.where(np.asarray(pt) > high_pt_th)[0]
print('weight hi th', high_weights_th)
print('loc hi pt', loc_high_pt)
# print('loc hi weight', loc_high_weights)
print('edges of high weight', [edgelist[i] for i in loc_high_weights])
edgelist_hi = [edgelist[i] for i in loc_high_weights]
for i in loc_high_weights:
# print('loc of high weight along edgeweight', i)
start = edgelist[i][0]
end = edgelist[i][1]
# print('start and end node', start, end)
if (start in loc_high_pt) | (end in loc_high_pt):
# print("found a high pt high weight node", (start, end), pt[start], pt[end])
weights[i] = 0.5 * np.mean(weights)
upper_lim = np.percentile(weights, 90) # 80
lower_lim = np.percentile(weights, 10) # 20
weights = [i if i <= upper_lim else upper_lim for i in weights]
weights = [i if i >= lower_lim else lower_lim for i in weights]
for i, (start, end) in enumerate(edgelist):
# print('i, start, end', i, start, end)
Pt_a = pt[start]
Pt_b = pt[end]
P_ab = weights[i]
t_ab = Pt_a - Pt_b
Bias_ab = K / ((C + math.exp(b * (t_ab + c)))) ** nu
new_weight = (Bias_ab * P_ab)
bias_weight.append(new_weight)
# print('tab', t_ab, 'pab', P_ab, 'biased_pab', new_weight)
print('original weights', len(weights), list(enumerate(zip(edgelist, weights))))
print('bias weights', list(enumerate(zip(edgelist, bias_weight))))
print('length bias weights', len(bias_weight))
# bias_weight=np.asarray(bias_weight)
# bias_weight = (bias_weight-np.min(bias_weight)+0.1)/(np.max(bias_weight)-np.min(bias_weight)+0.1)
return list(bias_weight)
def expected_num_steps(start_i, N):
n_t = N.shape[0]
N_steps = np.dot(N, np.ones(n_t))
n_steps_i = N_steps[start_i]
return n_steps_i
def absorption_probability(N, R, absorption_state_j):
M = np.dot(N, R)
vec_prob_end_in_j = M[:, absorption_state_j]
return M, vec_prob_end_in_j
def most_likely_path(P_transition_absorbing_markov, start_i, end_i):
graph_absorbing_markov = 0 # ig() log weight them
shortest_path = graph_absorbing_markov.shortest_path(start_i, end_i)
print('the shortest path beginning at ', start_i, 'and ending in ', end_i, 'is:')
return shortest_path
def draw_trajectory_gams(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal, sub_terminal_clusters,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(0).astype(
int)
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
print('final_super_terminal', final_super_terminal)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
print('pt sub', pt_sub[0:20])
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start]['x'].values # groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start]['y'].values # .groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end]['x'].values # .groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end]['y'].values # groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[
0] # np.where((X_dimred[:,0]<=maxx) & (X_dimred[:,0]>=minx))#
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[
0] # np.where((X_dimred[:,1]<=maxy) & (X_dimred[:,1]>=miny))#
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep] # X_dimred[idx_keep,0]#
y_val = y_val[idx_keep] # X_dimred[idx_keep,1]# y_val[idx_keep]
print('start and end', start, '', end)
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
print('dist', dist)
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
print('midpoint loc', midpoint_loc)
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
noise = 0.05
print(midpoint_xy, 'is the midpoint between clus', pt[start], 'and ', pt[end])
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
x_val = x_val.reshape((len(x_val), -1))
y_val = y_val.reshape((len(y_val), -1))
xp = np.linspace(minx, maxx, 500)
gam50 = pg.LinearGAM(n_splines=4, spline_order=3, lam=10).gridsearch(x_val, y_val)
XX = gam50.generate_X_grid(term=0, n=500)
preds = gam50.predict(XX)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
# cc = ['black', 'red', 'blue', 'yellow', 'pink'][random.randint(0, 4)]
ax2.plot(XX, preds, linewidth=1, c='dimgray')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc + step] - xp[closest_loc],
preds[closest_loc + step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc - step] - xp[closest_loc],
preds[closest_loc - step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
pen_color = []
super_cluster_label = []
terminal_count_ = 0
dot_size = []
for i in range(len(set(super_cluster_labels))):
if i in final_super_terminal:
print('super cluster', i, 'is a super terminal with sub_terminal cluster',
sub_terminal_clusters[terminal_count_])
width_edge.append(2)
c_edge.append('yellow')
pen_color.append('black')
super_cluster_label.append('TS' + str(sub_terminal_clusters[terminal_count_]))
dot_size.append(60)
terminal_count_ = terminal_count_ + 1
else:
width_edge.append(0)
c_edge.append('black')
pen_color.append('grey')
super_cluster_label.append('')
dot_size.append(40)
# ax2.scatter(x_cluster, y_cluster, c='red') #doesnt visualize as well to just take the embedding cluster-mean x,y values
# text annotations for the super cluster locations
# for i, type in enumerate(pt_str):
# ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
# for i in range(len(x_cluster)):
# ax2.text(x_cluster[i], y_cluster[i], 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
# ax2.set_title('super_knn:' + str(knn) )
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
# ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors = c_edge, linewidth = width_edge)
count_ = 0
for i, c, w, pc, dsz in zip(sc_supercluster_nn, c_edge, width_edge, pen_color, dot_size):
ax2.scatter(X_dimred[i, 0], X_dimred[i, 1], c='black', s=dsz, edgecolors=c, linewidth=w)
ax2.text(X_dimred[i, 0] + 0.5, X_dimred[i, 1] + 0.5, super_cluster_label[count_],
color=pc) # using the SC_NN location is good
count_ = count_ + 1
plt.title(title_str)
return
def draw_trajectory_dimred(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(1).astype(
int)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[0]
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[0]
print('len x-val before intersect', len(x_val))
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep]
y_val = y_val[idx_keep]
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
print('dist', dist)
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
print('midpoint loc', midpoint_loc)
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
# midpoint_xy = list_selected_clus[midpoint_loc]
noise = 0.05
print(midpoint_xy, 'is the midpoint between clus', pt[start], 'and ', pt[end])
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
z = np.polyfit(x_val, y_val, 2)
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
ax2.plot(xp[idx_keep], smooth[idx_keep], linewidth=3, c='dimgrey')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc + step] - xp[closest_loc],
smooth[closest_loc + step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc - step] - xp[closest_loc],
smooth[closest_loc - step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
for i in range(num_parc_group):
if i in final_super_terminal:
width_edge.append(2.5)
c_edge.append('yellow')
else:
width_edge.append(0)
c_edge.append('black')
ax2.scatter(x_cluster, y_cluster, c='red')
for i, type in enumerate(pt_str):
ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
for i in range(len(x_cluster)):
ax2.text(x_cluster[i], y_cluster[i], pt_sub[i] + 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors=c_edge, linewidth=width_edge)
plt.title(title_str)
return
def csr_mst(adjacency_matrix):
# return minimum spanning tree from adjacency matrix (csr)
Tcsr = adjacency_matrix.copy()
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components before mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
Tcsr.data = -1 * Tcsr.data
Tcsr.data = Tcsr.data - np.min(Tcsr.data)
Tcsr.data = Tcsr.data + 1
print('len Tcsr data', len(Tcsr.data))
Tcsr = minimum_spanning_tree(Tcsr) # adjacency_matrix)
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components after mst', n_components_mst)
Tcsr = (Tcsr + Tcsr.T) * 0.5 # make symmetric
print('number of components after symmetric mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
return Tcsr
def connect_all_components(MSTcsr, cluster_graph_csr, adjacency_matrix):
# connect forest of MSTs (csr)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
while n_components > 1:
sub_td = MSTcsr[comp_labels == 0, :][:, comp_labels != 0]
print('minimum value of link connecting components', np.min(sub_td.data))
locxy = scipy.sparse.find(MSTcsr == np.min(sub_td.data))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] == 0) & (comp_labels[locxy[1][i]] != 0):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
cluster_graph_csr[x, y] = minval
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected componnents after reconnecting ', n_components)
return cluster_graph_csr
def local_pruning_clustergraph_mst(adjacency_matrix, global_pruning_std=1, max_outgoing=30, preserve_disconnected=True):
# larger pruning_std factor means less pruning
# the mst is only used to reconnect components that become disconnect due to pruning
from scipy.sparse.csgraph import minimum_spanning_tree
Tcsr = csr_mst(adjacency_matrix)
initial_links_n = len(adjacency_matrix.data)
n_components_0, comp_labels_0 = connected_components(csgraph=adjacency_matrix, directed=False, return_labels=True)
print('number of components before pruning', n_components_0, comp_labels_0)
adjacency_matrix = scipy.sparse.csr_matrix.todense(adjacency_matrix)
row_list = []
col_list = []
weight_list = []
neighbor_array = adjacency_matrix # not listed in in any order of proximity
n_cells = neighbor_array.shape[0]
rowi = 0
for i in range(neighbor_array.shape[0]):
row = np.asarray(neighbor_array[i, :]).flatten()
# print('row, row')
n_nonz = np.sum(row > 0)
# print('n nonzero 1', n_nonz)
n_nonz = min(n_nonz, max_outgoing)
to_keep_index = np.argsort(row)[::-1][0:n_nonz] # np.where(row>np.mean(row))[0]#
# print('to keep', to_keep_index)
updated_nn_weights = list(row[to_keep_index])
for ik in range(len(to_keep_index)):
row_list.append(rowi)
col_list.append(to_keep_index[ik])
dist = updated_nn_weights[ik]
weight_list.append(dist)
rowi = rowi + 1
final_links_n = len(weight_list)
print('final links n', final_links_n)
cluster_graph_csr = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = cluster_graph_csr.nonzero()
mask = np.zeros(len(sources), dtype=bool)
cluster_graph_csr.data = cluster_graph_csr.data / (np.std(cluster_graph_csr.data)) # normalize
threshold_global = np.mean(cluster_graph_csr.data) - global_pruning_std * np.std(cluster_graph_csr.data)
mask |= (cluster_graph_csr.data < (threshold_global)) # smaller Jaccard weight means weaker edge
cluster_graph_csr.data[mask] = 0
cluster_graph_csr.eliminate_zeros()
print('shape of cluster graph', cluster_graph_csr.shape)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected components after pruning', n_components)
if (preserve_disconnected == True) & (n_components > n_components_0): # preserve initial disconnected components
Td = Tcsr.todense()
Td[Td == 0] = 999.999
n_components_ = n_components
while n_components_ > n_components_0:
for i in range(n_components_0):
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('locx', loc_x, len_i)
while len_i > 1:
s = list(set(comp_labels[loc_x]))
loc_notxx = np.intersect1d(loc_x, np.where((comp_labels != s[0]))[0])
# print('loc_notx', loc_notxx)
loc_xx = np.intersect1d(loc_x, np.where((comp_labels == s[0]))[0])
sub_td = Td[loc_xx, :][:, loc_notxx]
# print('subtd-min', np.min(sub_td))
locxy = np.where(Td == np.min(sub_td))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] != comp_labels[locxy[1][i]]):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
print('inside reconnecting components while preserving original ', x, y, minval)
cluster_graph_csr[x, y] = minval
n_components_, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False,
return_labels=True)
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('number of connected componnents after reconnecting ', n_components_)
'''
if (n_components > 1) & (preserve_disconnected == False):
cluster_graph_csr = connect_all_components(Tcsr, cluster_graph_csr, adjacency_matrix)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
'''
sources, targets = cluster_graph_csr.nonzero()
edgelist = list(zip(sources, targets))
edgeweights = cluster_graph_csr.data / (np.std(cluster_graph_csr.data))
trimmed_n = (initial_links_n - final_links_n) * 100 / initial_links_n
trimmed_n_glob = (initial_links_n - len(edgeweights)) / initial_links_n
if global_pruning_std < 0.5:
print("percentage links trimmed from local pruning relative to start", trimmed_n)
print("percentage links trimmed from global pruning relative to start", trimmed_n_glob)
return edgeweights, edgelist, comp_labels
def get_sparse_from_igraph(graph, weight_attr=None):
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
class PARC:
def __init__(self, data, true_label=None, anndata=None, dist_std_local=2, jac_std_global='median',
keep_all_local_dist='auto',
too_big_factor=0.4, small_pop=10, jac_weighted_edges=True, knn=30, n_iter_leiden=5, random_seed=42,
num_threads=-1, distance='l2', time_smallpop=15, pseudotime=False,
root=0, path='/home/shobi/Trajectory/', super_cluster_labels=False,
super_node_degree_list=False, super_terminal_cells=False, x_lazy=0.95, alpha_teleport=0.99,
root_user="root_cluster", preserve_disconnected=True, dataset="humanCD34", super_terminal_clusters=[], do_magic=False):
# higher dist_std_local means more edges are kept
# highter jac_std_global means more edges are kept
if keep_all_local_dist == 'auto':
if data.shape[0] > 300000:
keep_all_local_dist = True # skips local pruning to increase speed
else:
keep_all_local_dist = False
self.data = data
self.true_label = true_label
self.anndata = anndata
self.dist_std_local = dist_std_local
self.jac_std_global = jac_std_global ##0.15 is also a recommended value performing empirically similar to 'median'
self.keep_all_local_dist = keep_all_local_dist
self.too_big_factor = too_big_factor ##if a cluster exceeds this share of the entire cell population, then the PARC will be run on the large cluster. at 0.4 it does not come into play
self.small_pop = small_pop # smallest cluster population to be considered a community
self.jac_weighted_edges = jac_weighted_edges
self.knn = knn
self.n_iter_leiden = n_iter_leiden
self.random_seed = random_seed # enable reproducible Leiden clustering
self.num_threads = num_threads # number of threads used in KNN search/construction
self.distance = distance # Euclidean distance 'l2' by default; other options 'ip' and 'cosine'
self.time_smallpop = time_smallpop
self.pseudotime = pseudotime
self.root = root
self.path = path
self.super_cluster_labels = super_cluster_labels
self.super_node_degree_list = super_node_degree_list
self.super_terminal_cells = super_terminal_cells
self.x_lazy = x_lazy # 1-x = probability of staying in same node
self.alpha_teleport = alpha_teleport # 1-alpha is probability of jumping
self.root_user = root_user
self.preserve_disconnected = preserve_disconnected
self.dataset = dataset
self.super_terminal_clusters = super_terminal_clusters
self.do_magic = do_magic
def get_terminal_clusters(self, A, markov_pt, root_ai):
n_ = A.shape[0]
if n_ <= 10: n_outlier_std = 3
if (n_ <= 40) & (n_ > 10):n_outlier_std = 2
if n_>=40: n_outlier_std = 1
pop_list = []
print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
# make an igraph graph to compute the closeness
g_dis = ig.Graph.Adjacency((A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
g_dis.es['weights'] = 1/A_new[A_new.nonzero()] #we want "distances" not weights for closeness and betweeness
betweenness_score = g_dis.betweenness(weights = 'weights')
betweenness_score_array = np.asarray(betweenness_score)
betweenness_score_takeout_outlier = betweenness_score_array[betweenness_score_array<(np.mean(betweenness_score_array)+n_outlier_std*np.std(betweenness_score_array))]
betweenness_list = [ i for i, score in enumerate(betweenness_score) if score < (np.mean(betweenness_score_takeout_outlier) - 0 * np.std(betweenness_score_takeout_outlier))]
closeness_score = g_dis.closeness( mode='ALL', cutoff=None, weights='weights', normalized=True)
closeness_score_array = np.asarray( closeness_score)
closeness_score_takeout_outlier = closeness_score_array[closeness_score_array < (np.mean( closeness_score_array) + n_outlier_std * np.std( closeness_score_array))]
closeness_list = [i for i, score in enumerate(closeness_score) if
score < (np.mean(closeness_score_takeout_outlier) - 0 * np.std(closeness_score_takeout_outlier))]
print('closeness_score ', [(i, score) for i, score in enumerate(closeness_score)])
print('closeness_score shortlist', closeness_list)
print('betweeness_score ', [(i,score) for i, score in enumerate(betweenness_score)])
print('betweeness_score shortlist', betweenness_list)
# make an igraph graph to compute the closeness
#g_ = ig.Graph.Adjacency( (A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
#g_.es['weights'] =A_new[A_new.nonzero()] # we want "distances" not weights for closeness and betweeness
#eig_cent_score = g_.evcent(weights='weights',scale = False, directed = True)
#print('eigcent', eig_cent_score)
#eig_cent_list = [i for i, score in enumerate(eig_cent_score) if score < (np.mean(eig_cent_score) - 0 * np.std(eig_cent_score))]
#print('eigcent shortlist', eig_cent_list)
out_deg = A_new.sum(axis=1)
in_deg = A_new.sum(axis=0)
# for pi, item in enumerate(out_deg):
# out_list.append(item/pop_list[i])
out_deg = np.asarray(out_deg)
# print('out deg', out_deg)
print('number of clusters', n_)
if n_ <= 10:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0]
print('low deg super', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 60))[
0] # 60 Ttoy #10 for human but not sure ever in play
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
print('high pt super', loc_pt)
if (n_ <= 40) & (n_ > 10):
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[
0] # np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0]#np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0] # 30 for Toy #was 50 for Human
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 20))[0]
print('low deg super', loc_deg)
print('low in-deg super', loc_deg_in)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 10))[0] # 60 Toy
print('high pt super', loc_pt)
if n_ > 40:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0] # 15 Toy
print('low deg', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 30))[0] # 60Toy
print('high pt', loc_pt)
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
#terminal_clusters = list(set(loc_deg) | set(loc_deg_in))
#terminal_clusters = list(set(closeness_list) & set(loc_pt))
terminal_clusters_1 = list(set(closeness_list)&set(betweenness_list))
terminal_clusters_2 = list(set(closeness_list) & set(loc_deg))
terminal_clusters_3 = list(set(betweenness_list) & set(loc_deg))
terminal_clusters = list(set(terminal_clusters_1)|set(terminal_clusters_2))
terminal_clusters = list(set(terminal_clusters)|set(terminal_clusters_3))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
terminal_org = terminal_clusters.copy()
print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
removed_terminal_i = False
# print('terminal state', terminal_i)
count_nn = 0
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
# print('terminal state', terminal_i)
if item in terminal_clusters:
print('item and terminal',
item, terminal_clusters)
count_nn = count_nn + 1
if item == root_ai: # if the terminal state is a neighbor of
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >= 3:
if removed_terminal_i == False: terminal_clusters.remove(terminal_i)
print('TS', terminal_i, 'had 3 or more neighboring terminal states')
print('terminal_clusters', terminal_clusters)
return terminal_clusters
def get_terminal_clusters_old(self, A, markov_pt, root_ai):
pop_list = []
print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
out_deg = A_new.sum(axis=1)
in_deg = A_new.sum(axis=0)
# for pi, item in enumerate(out_deg):
# out_list.append(item/pop_list[i])
out_deg = np.asarray(out_deg)
print('out deg', out_deg)
n_ = A.shape[0]
print('number of clusters', n_)
if n_ <= 10:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0]
print('low deg super', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 60))[
0] # 60 Ttoy #10 for human but not sure ever in play
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
print('high pt super', loc_pt)
if (n_ <= 40) & (n_ > 10):
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[
0] # np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0]#np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0] # 30 for Toy #was 50 for Human
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 20))[0]
print('low deg super', loc_deg)
print('low in-deg super', loc_deg_in)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 30))[0] # 60 Toy
print('high pt super', loc_pt)
if n_ > 40:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 30))[0] # 15 Toy
print('low deg', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 40))[0] # 60Toy
print('high pt', loc_pt)
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
terminal_clusters = list(set(loc_deg) | set(loc_deg_in))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
# terminal_clusters.reverse()
terminal_org = terminal_clusters.copy()
print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
removed_terminal_i = False
# print('terminal state', terminal_i)
count_nn = 0
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
# print('terminal state', terminal_i)
if item in terminal_clusters:
print('item and terminal',
item, terminal_clusters)
count_nn = count_nn + 1
if item == root_ai: # if the terminal state is a neighbor of
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >= 3:
if removed_terminal_i == False: terminal_clusters.remove(terminal_i)
print('TS', terminal_i, 'had 4 or more neighboring terminal states')
print('terminal_clusters', terminal_clusters)
return terminal_clusters
def compute_hitting_time(self, sparse_graph, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = sparse_graph.shape[0]
# print('adjacency in compute hitting', sparse_graph)
# sparse_graph = scipy.sparse.csr_matrix(sparse_graph)
print('start compute hitting')
A = scipy.sparse.csr_matrix.todense(sparse_graph) # A is the adjacency matrix
print('is graph symmetric', (A.transpose() == A).all())
lap = csgraph.laplacian(sparse_graph,
normed=False) # compute regular laplacian (normed = False) to infer the degree matrix where D = L+A
# see example and definition in the SciPy ref https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.laplacian.html
A = scipy.sparse.csr_matrix.todense(lap)
print('is laplacian symmetric', (A.transpose() == A).all())
deg = sparse_graph + lap # Recall that L=D-A (modified for weighted where D_ii is sum of edge weights and A_ij is the weight of particular edge)
deg.data = 1 / np.sqrt(deg.data) ##inv sqrt of degree matrix
deg[deg == np.inf] = 0
norm_lap = csgraph.laplacian(sparse_graph, normed=True) # returns symmetric normalized D^-.5 xL x D^-.5
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
norm_lap = scipy.sparse.csr_matrix.todense(norm_lap)
eig_val, eig_vec = np.linalg.eig(
norm_lap) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
# print('eig val', eig_val.shape, eig_val)
if number_eig == 0: number_eig = eig_vec.shape[1]
# print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1th eg
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
deg = scipy.sparse.csr_matrix.todense(deg)
temp = Greens_matrix.dot(deg)
temp = deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def pagerank_compute(self, P_bias, max_iterations=200):
x_lazy = self.x_lazy # 1-x is prob lazy
alpha_teleport = self.alpha_teleport
# bias_P is the transition probability matrix
n = P_bias.shape[0]
P_bias = x_lazy * P_bias + (1 - x_lazy) * np.identity(n)
P_bias = alpha_teleport * P_bias + ((1 - alpha_teleport) * (1 / n) * (np.ones((n, n)) - np.identity(n)))
# transition matrix for the lazy, teleporting directed walk
p0 = 1.0 / float(n)
# p0=np.zeros((n,1))
# p0[self.root,0] = 1#np.ones((n,1))*p0
p0 = np.ones((n, 1)) * p0
p0 = p0.T # random uniform initial stationary distribution
for iteration in range(max_iterations):
# old = p0.copy()
p0 = p0.dot(P_bias)
# delta = p0 - old
# delta = math.sqrt(delta.dot(delta.T))
p0 = p0[0] / np.sum(p0[0])
# print('p0 stationary is', [('c' + str(i), pp0) for i, pp0 in enumerate(p0)])
# print([('c' + str(i), pp0) for i, pp0 in enumerate(p0) if pp0>np.mean(p0)])
upperlim = np.percentile(p0, 90)
lowerlim = np.percentile(p0, 10)
# upper_val = p0[p0 >upperlim]
# upperlim = np.mean(upper_val)
# print('upper lim', upperlim)
if self.too_big_factor < 0.3:
p0 = np.array([d if d <= upperlim else upperlim for d in p0])
p0 = p0 / np.sum(p0)
print('final stationary', [(i, pp0) for i, pp0 in enumerate(p0)])
return p0
def prob_reaching_terminal_state1(self, terminal_state, all_terminal_states, A, root, pt, num_sim,q,cumstateChangeHist, cumstateChangeHist_all,seed):
np.random.seed(seed)
print('root', root)
print('terminal state target', terminal_state)
n_states = A.shape[0]
n_components, labels = connected_components(csgraph=csr_matrix(A), directed=False)
A = A / (np.max(A))
# A[A<=0.05]=0
jj = 0
for row in A:
if np.all(row == 0): A[jj, jj] = 1
jj = jj + 1
P = A / A.sum(axis=1).reshape((n_states, 1))
# if P.shape[0]>16:
# print("P 16", P[:,16])
n_steps = int(2* n_states) # 2
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
neigh_terminal = np.where(A[:, terminal_state] > 0)[0]
non_nn_terminal_state = []
for ts_i in all_terminal_states:
if pt[ts_i] > pt[terminal_state]: non_nn_terminal_state.append(ts_i)
for ts_i in all_terminal_states:
if np.all(neigh_terminal != ts_i): non_nn_terminal_state.append(ts_i)
# print(ts_i, 'is a non-neighbor terminal state to the target terminal', terminal_state)
#cumstateChangeHist = np.zeros((1, n_states))
#cumstateChangeHist_all = np.zeros((1, n_states))
count_reach_terminal_state = 0
count_r = 0
for i in range(num_sim):
# distr_hist = [[0 for i in range(n_states)]]
stateChangeHist = np.zeros((n_states, n_states))
stateChangeHist[root, root] = 1
state = state_root
currentState = root
stateHist = state
terminal_state_found = False
non_neighbor_terminal_state_reached = False
# print('root', root)
# print('terminal state target', terminal_state)
x = 0
while (x < n_steps) & (
(terminal_state_found == False)): # & (non_neighbor_terminal_state_reached == False)):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
# print('next state', nextState)
if nextState == terminal_state:
terminal_state_found = True
count_r = count_r+1
# print('terminal state found at step', x)
# if nextState in non_nn_terminal_state:
# non_neighbor_terminal_state_reached = True
# Keep track of state changes
stateChangeHist[currentState, nextState] += 1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
currentState = nextState
x = x + 1
if (terminal_state_found == True):
cumstateChangeHist = cumstateChangeHist + np.any(
stateChangeHist > 0, axis=0)
count_reach_terminal_state = count_reach_terminal_state + 1
cumstateChangeHist_all = cumstateChangeHist_all + np.any(
stateChangeHist > 0, axis=0)
# avoid division by zero on states that were never reached (e.g. terminal states that come after the target terminal state)
cumstateChangeHist_all[cumstateChangeHist_all == 0] = 1
prob_ = cumstateChangeHist / cumstateChangeHist_all
np.set_printoptions(precision=3)
#print('in multiproc: number of times Terminal state', terminal_state, 'is found:', count_reach_terminal_state)
#print('in multiproc: changeHist_all[0,terminal]', terminal_state, 'is found:', cumstateChangeHist_all[0, terminal_state])
#print(cumstateChangeHist)
#print(cumstateChangeHist_all)
q.append([cumstateChangeHist, cumstateChangeHist_all])
def simulate_markov_sub(self, A, num_sim, hitting_array, q, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# hitting_array = np.ones((P.shape[0], 1)) * 1000
hitting_array_temp = np.zeros((P.shape[0], 1)).astype('float64')
n_steps = int(2 * n_states)
hitting_array_final = np.zeros((1, n_states))
currentState = root
print('root is', root)
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
for i in range(num_sim):
dist_list = []
# print(i, 'th simulation in Markov')
# if i % 10 == 0: print(i, 'th simulation in Markov', time.ctime())
state = state_root
currentState = root
stateHist = state
for x in range(n_steps):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
dist = A[currentState, nextState]
dist = (1 / ((1 + math.exp((dist - 1)))))
dist_list.append(dist)
# print('next state', nextState)
# Keep track of state changes
# stateChangeHist[currentState,nextState]+=1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
currentState = nextState
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
# calculate the actual distribution over the n_states so far
# totals = np.sum(stateHist, axis=0)
# gt = np.sum(totals)
# distrib = totals / gt
# distrib = np.reshape(distrib, (1, n_states))
# distr_hist = np.append(distr_hist, distrib, axis=0)
for state_i in range(P.shape[0]):
# print('first reach state', state_i, 'at step', np.where(stateHist[:, state_i] == 1)[0][0])
first_time_at_statei = np.where(stateHist[:, state_i] == 1)[0]
if len(first_time_at_statei) == 0:
# print('did not reach state', state_i,'setting dummy path length')
hitting_array_temp[state_i, 0] = n_steps + 1
else:
total_dist = 0
for ff in range(first_time_at_statei[0]):
total_dist = dist_list[ff] + total_dist
hitting_array_temp[state_i, 0] = total_dist # first_time_at_statei[0]
# hitting_array_temp[hitting_array_temp==(n_steps+1)] = np.mean(hitting_array_temp[hitting_array_temp!=n_steps+1])
hitting_array = np.append(hitting_array, hitting_array_temp, axis=1)
# print('hitting temp', hitting_array_temp)
# if i % 100 == 0: print(i, 'th','has hitting temp', hitting_array_temp.flatten())
hitting_array = hitting_array[:, 1:]
q.append(hitting_array) # put(hitting_array)
# return hitting_array
def simulate_branch_probability(self, terminal_state, all_terminal_states, A, root, pt, num_sim=300 ):
n_states = A.shape[0]
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
print('num_sim_pp', num_sim_pp)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
seed_list = list(range(n_jobs))
for i in range(n_jobs):
cumstateChangeHist = np.zeros((1, n_states))
cumstateChangeHist_all = np.zeros((1, n_states))
process = multiprocessing.Process(target=self.prob_reaching_terminal_state1,args=(terminal_state, all_terminal_states, A, root, pt, num_sim_pp,q, cumstateChangeHist, cumstateChangeHist_all, seed_list[i]))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
cumhistory_vec = q[0][0]
cumhistory_vec_all = q[0][1]
count_reached= cumhistory_vec_all[0,terminal_state]
print('length of q', len(q))
for i in range(1,len(q)):#[1,2,3,4]:
#for qi in q[1:]:
cumhistory_vec = cumhistory_vec + q[i][0]
cumhistory_vec_all = cumhistory_vec_all+ q[i][1]
#hitting_array = np.append(hitting_array, qi, axis=1) # .get(), axis=1)
count_reached = count_reached+ q[i][1][0,terminal_state]
print('accumulated number of times Terminal state',terminal_state, 'is found:',count_reached)
print('cumhistory_vec', cumhistory_vec)
print('cumhistory_vec_all', cumhistory_vec_all)
cumhistory_vec_all[cumhistory_vec_all == 0] = 1
prob_ = cumhistory_vec /cumhistory_vec_all
np.set_printoptions(precision=3)
print('prob', prob_)
if count_reached == 0:
prob_[:, terminal_state] = 0
print('never reached state', terminal_state)
else:
loc_1 = np.where(prob_ == 1)
print('loc_1', loc_1)
loc_1 = loc_1[1]
print('loc_1', loc_1)
# prob_[0, terminal_state] = 0 # starting at the root, index=0
prob_[0, loc_1] = 0
#print('zerod out prob', prob_)
prob_ = prob_ / min(1,1.1 * np.max(prob_))
# prob_[0, terminal_state] = 1
prob_[0, loc_1] = 1
#prob_ = np.sqrt(prob_)
print('np.max', np.max(prob_))
#prob_ = prob_/np.max(prob_)
print('scaled prob', prob_)
return list(prob_)[0]
def simulate_markov(self, A, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# print('row normed P',P.shape, P, P.sum(axis=1))
x_lazy = self.x_lazy # 1-x is prob lazy
alpha_teleport = self.alpha_teleport
# bias_P is the transition probability matrix
# P = x_lazy * P + (1 - x_lazy) * np.identity(n_states)
# print(P, P.sum(axis=1))
# P = alpha_teleport * P + ((1 - alpha_teleport) * (1 / n_states) * (np.ones((n_states, n_states))))
# print('check prob of each row sum to one', P.sum(axis=1))
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
stateHist = state
dfStateHist = pd.DataFrame(state)
distr_hist = np.zeros([1, n_states])
num_sim = 1300 # 1000 # 1300
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
print('num_sim_pp', num_sim_pp)
n_steps = int(2 * n_states)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
for i in range(n_jobs):
hitting_array = np.ones((P.shape[0], 1)) * 1000
process = multiprocessing.Process(target=self.simulate_markov_sub,
args=(P, num_sim_pp, hitting_array, q, root))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
print('ended all multiprocesses, will retrieve and reshape')
hitting_array = q[0]
for qi in q[1:]:
hitting_array = np.append(hitting_array, qi, axis=1) # .get(), axis=1)
print('finished getting from queue', hitting_array.shape)
hitting_array_final = np.zeros((1, n_states))
no_times_state_reached_array = np.zeros((1, n_states))
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached_array[0, i] = np.sum(rowtemp != (n_steps + 1))
lower_quart = np.percentile(no_times_state_reached_array, 25)
# loc_rarely_reached = np.where(no_times_state_reached_array<= upper_quart)
# print('rarely reached clus', loc_rarely_reached, upper_quart, no_times_state_reached_array)
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached = np.sum(rowtemp != (n_steps + 1))
if no_times_state_reached != 0:
# print('the number of times state ',i, 'has been reached is', no_times_state_reached )
# if no_times_state_reached < lower_quart:
# perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 5) + 0.001
# print('in lower quart for state', i)
perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 15) + 0.001 # 15 for Human and Toy
# print('state ', i,' has perc' ,perc)
# print('smaller than perc', rowtemp[rowtemp <= perc])
# hitting_array_final[0, i] = np.min(rowtemp[rowtemp != (n_steps + 1)])
hitting_array_final[0, i] = np.mean(rowtemp[rowtemp <= perc])
else:
hitting_array_final[0, i] = (n_steps + 1)
# hitting_array=np.mean(hitting_array, axis=1)
print('hitting from sim markov', [(i, val) for i, val in enumerate(hitting_array_final.flatten())])
return hitting_array_final[0]
def compute_hitting_time_onbias(self, laplacian, inv_sqr_deg, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = laplacian.shape[0]
print('is laplacian of biased symmetric', (laplacian.transpose() == laplacian).all())
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
# norm_lap = scipy.sparse.csr_matrix.todense(laplacian)
eig_val, eig_vec = np.linalg.eig(
laplacian) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
print('eig val', eig_val.shape)
if number_eig == 0: number_eig = eig_vec.shape[1]
print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1th eg
# print(i, 'th eigenvalue is', eig_val[i])
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
# print('factor', 1 / factor)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
temp = Greens_matrix.dot(inv_sqr_deg)
temp = inv_sqr_deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def project_hittingtimes_sc(self, pt):
if self.data.shape[0] > 1000:
knn_sc = 30
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
sc_pt = np.zeros((len(self.labels),))
i = 0
for row in neighbor_array:
mean_weight = 0
# print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
# print('neighbor clusters labels', neighboring_clus)
for clus_i in set(list(neighboring_clus)):
hitting_time_clus_i = pt[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
#if clus_i == self.root[0]: print('root is a neighbor', pt[clus_i], 'num NN cells beloning to root', num_clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
mean_weight = mean_weight + hitting_time_clus_i * num_clus_i / knn_sc
# print('mean weight',mean_weight)
sc_pt[i] = mean_weight
#if self.root[0] in set(list(neighboring_clus)): print('the mean sc time for root neighbor is', mean_weight)
i = i + 1
return sc_pt
def project_branch_probability_sc(self, bp_array_clus):
if self.data.shape[0] > 1000:
knn_sc = 10 # 30
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
weight_array = np.zeros((len(self.labels), len(list(set(self.labels)))))
for irow, row in enumerate(neighbor_array):
mean_weight = 0
#print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
print('neighbor clusters labels', neighboring_clus)
for clus_i in set(list(neighboring_clus)):
# hitting_time_clus_i = df_graph[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
wi = num_clus_i / knn_sc
weight_array[irow, clus_i] = wi
# print('mean weight',mean_weight)
#print('rowi of weight array', weight_array[irow,:])
#print('shape weight array', weight_array)
print(weight_array)
bp_array_sc = weight_array.dot(bp_array_clus)
bp_array_sc = bp_array_sc * 1. / np.max(bp_array_sc, axis=0) #divide cell by max value in that column
print('column max:',np.max(bp_array_sc, axis=0))
#print('sc bp array max', np.max(bp_array_sc))
#bp_array_sc = bp_array_sc/np.max(bp_array_sc)
for i, label_ts in enumerate(list(self.terminal_clusters)):
print('set labels', set(labels))
print('set terminal clus' ,set(self.terminal_clusters))
loc_i = np.where(np.asarray(self.labels) == label_ts)[0]
loc_noti = np.where(np.asarray(self.labels) != label_ts)[0]
if np.max(bp_array_sc[loc_noti,i])==1: bp_array_sc[loc_i,i]=1.2
print('terminal cluster', label_ts, len(loc_i), loc_i)
print('sc bp array', bp_array_sc)
self.single_cell_bp = bp_array_sc
return
def make_knn_struct(self, too_big=False, big_cluster=None):
if self.knn > 190: print('please provide a lower K_in for KNN graph construction')
ef_query = max(100, self.knn + 1) # ef always should be >K. higher ef, more accuate query
if too_big == False:
num_dims = self.data.shape[1]
n_elements = self.data.shape[0]
p = hnswlib.Index(space=self.distance, dim=num_dims) # default to Euclidean distance
p.set_num_threads(self.num_threads) # allow user to set threads used in KNN construction
if n_elements < 10000:
ef_param_const = min(n_elements - 10, 500)
ef_query = ef_param_const
print('setting ef_construction to', )
else:
ef_param_const = 200
if num_dims > 30:
p.init_index(max_elements=n_elements, ef_construction=ef_param_const,
M=48) ## good for scRNA seq where dimensionality is high
else:
p.init_index(max_elements=n_elements, ef_construction=200, M=30, )
p.add_items(self.data)
if too_big == True:
num_dims = big_cluster.shape[1]
n_elements = big_cluster.shape[0]
p = hnswlib.Index(space='l2', dim=num_dims)
p.init_index(max_elements=n_elements, ef_construction=200, M=30)
p.add_items(big_cluster)
p.set_ef(ef_query) # ef should always be > k
return p
def make_csrmatrix_noselfloop(self, neighbor_array, distance_array):
local_pruning_bool = not (self.keep_all_local_dist)
if local_pruning_bool == True: print('commencing local pruning based on minkowski metric at',
self.dist_std_local, 's.dev above mean')
row_list = []
col_list = []
weight_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
# print('size neighbor array', neighbor_array.shape)
num_neigh = neighbor_array.shape[1]
distance_array = distance_array
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
rowi = 0
count_0dist = 0
discard_count = 0
if local_pruning_bool == True: # do some local pruning based on distance
for row in neighbor_array:
distlist = distance_array[rowi, :]
to_keep = np.where(distlist <= np.mean(distlist) + self.dist_std_local * np.std(distlist))[0] # 0*std
updated_nn_ind = row[np.ix_(to_keep)]
updated_nn_weights = distlist[np.ix_(to_keep)]
discard_count = discard_count + (num_neigh - len(to_keep))
for ik in range(len(updated_nn_ind)):
if rowi != row[ik]: # remove self-loops
row_list.append(rowi)
col_list.append(updated_nn_ind[ik])
dist = np.sqrt(updated_nn_weights[ik])
if dist == 0:
count_0dist = count_0dist + 1
weight_list.append(dist)
rowi = rowi + 1
if local_pruning_bool == False: # dont prune based on distance
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (1. / (distance_array.flatten() + 0.1)).tolist()
# if local_pruning_bool == True: print('share of neighbors discarded in local distance pruning %.1f' % (discard_count / neighbor_array.size))
csr_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
return csr_graph
def func_mode(self, ll): # return MODE of list
# If multiple items are maximal, the function returns the first one encountered.
return max(set(ll), key=ll.count)
def run_toobig_subPARC(self, X_data, jac_std_toobig=1,
jac_weighted_edges=True):
n_elements = X_data.shape[0]
hnsw = self.make_knn_struct(too_big=True, big_cluster=X_data)
if self.knn >= 0.8 * n_elements:
k = int(0.5 * n_elements)
else:
k = self.knn
neighbor_array, distance_array = hnsw.knn_query(X_data, k=k)
# print('shapes of neigh and dist array', neighbor_array.shape, distance_array.shape)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
sources, targets = csr_array.nonzero()
mask = np.zeros(len(sources), dtype=bool)
mask |= (csr_array.data > (
np.mean(csr_array.data) + np.std(csr_array.data) * 5)) # smaller distance means stronger edge
# print('sum of mask', sum(mask))
csr_array.data[mask] = 0
csr_array.eliminate_zeros()
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist_copy) # list of jaccard weights
new_edgelist = []
sim_list_array = np.asarray(sim_list)
if jac_std_toobig == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_toobig * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
for ii in strong_locs: new_edgelist.append(edgelist_copy[ii])
sim_list_new = list(sim_list_array[strong_locs])
if jac_weighted_edges == True:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
else:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist))
G_sim.simplify(combine_edges='sum')
resolution_parameter = 1
if jac_weighted_edges == True:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
else:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
# print('Q= %.2f' % partition.quality())
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 5: # <10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
do_while_time = time.time()
while (small_pop_exist == True) & (time.time() - do_while_time < 5):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 10:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
self.labels = PARC_labels_leiden
print('finished labels')
# self.anndata.obs['parc_label'] = self.labels
# cma1_cluster = self.anndata.obs.groupby('parc_label').mean('Cma1')
return PARC_labels_leiden
def recompute_weights(self, clustergraph_ig, pop_list_raw):
sparse_clustergraph = get_sparse_from_igraph(clustergraph_ig, weight_attr='weight')
n = sparse_clustergraph.shape[0]
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
weights = sparse_clustergraph.data
# print('edgelist of combined clustergraph', edgelist)
# print('edge weights of combined clustergraph', weights)
new_weights = []
i = 0
for s, t in edgelist:
pop_s = pop_list_raw[s]
pop_t = pop_list_raw[t]
w = weights[i]
nw = w * (pop_s + pop_t) / (pop_s * pop_t) # *
new_weights.append(nw)
# print('old and new', w, nw)
i = i + 1
scale_factor = max(new_weights) - min(new_weights)
wmin = min(new_weights)
# wmax = max(new_weights)
# print('weights before scaling', new_weights)
new_weights = [(wi + wmin) / scale_factor for wi in new_weights]
# print('weights after scaling', new_weights)
sparse_clustergraph = csr_matrix((np.array(new_weights), (sources, targets)),
shape=(n, n))
# print('new weights', new_weights)
# print(sparse_clustergraph)
# print('reweighted sparse clustergraph')
# print(sparse_clustergraph)
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
return sparse_clustergraph, edgelist
def find_root_HumanCD34(self, graph_dense, PARC_labels_leiden, root_idx, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_idx]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_bcell(self, graph_dense, PARC_labels_leiden, root_user, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_user]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root(self, graph_dense, PARC_labels_leiden, root_user, true_labels, super_cluster_labels_sub,
super_node_degree_list):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
min_deg = 1000
super_min_deg = 1000
found_super_and_sub_root = False
found_any_root = False
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
print('deg list', deg_list) # locallytrimmed_g.degree()
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
# print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if self.super_cluster_labels != False:
super_majority_cluster = self.func_mode(list(np.asarray(super_cluster_labels_sub)[cluster_i_loc]))
super_majority_cluster_loc = np.where(np.asarray(super_cluster_labels_sub) == super_majority_cluster)[0]
super_majority_truth = self.func_mode(list(true_labels[super_majority_cluster_loc]))
# print('spr node degree list sub',super_node_degree_list, super_majority_cluster)
super_node_degree = super_node_degree_list[super_majority_cluster]
if (str(root_user) in majority_truth) & (str(root_user) in str(super_majority_truth)):
if super_node_degree < super_min_deg:
# if deg_list[cluster_i] < min_deg:
found_super_and_sub_root = True
root = cluster_i
found_any_root = True
min_deg = deg_list[ci]
super_min_deg = super_node_degree
print('new root is', root, ' with degree', min_deg, 'and super node degree',
super_min_deg)
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
if (self.super_cluster_labels == False) | (found_super_and_sub_root == False):
print('self.super_cluster_labels', super_cluster_labels_sub, ' foundsuper_cluster_sub and super root',
found_super_and_sub_root)
for ic, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
print('cluster', cluster_i, 'set true labels', set(true_labels))
true_labels = np.asarray(true_labels)
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if (str(root_user) in str(majority_truth)):
print('did not find a super and sub cluster with majority ', root_user)
if deg_list[ic] < min_deg:
root = cluster_i
found_any_root = True
min_deg = deg_list[ic]
print('new root is', root, ' with degree', min_deg)
# print('len graph node label', graph_node_label)
if found_any_root == False:
print('setting arbitrary root', cluster_i)
self.root = cluster_i
return graph_node_label, majority_truth_labels, deg_list, root
def full_graph_paths(self, X_data, n_components_original=1):
# make igraph object of low-K KNN using the knn_struct PCA-dimension space made in PARC.
# This is later used by find_shortest_path for sc_bp visual
# neighbor array is not listed in in any order of proximity
print('number of components in the original full graph', n_components_original)
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=3)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
k_0 = 3
if n_components_original == 1:
while (n_comp > 1):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
if n_components_original > 1:
while (k_0 <= 5) & (n_comp > n_components_original):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
row_list = []
print('size neighbor array in low-KNN in pca-space for visualization', neighbor_array.shape)
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (distance_array.flatten()).tolist()
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
Gr = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
Gr.simplify(combine_edges='sum')
return Gr
def get_gene_expression(self, gene_exp, title_gene=""):
fig_0, ax = plt.subplots()
sc_pt = self.single_cell_pt_markov
sc_bp_original = self.single_cell_bp
n_terminal_states = sc_bp_original.shape[1]
jet = cm.get_cmap('jet', n_terminal_states)
cmap_ = jet(range(n_terminal_states))
# print('cmap', cmap_)
for i in range(n_terminal_states):
sc_bp = sc_bp_original.copy()
loc_terminal_i = np.where(np.asarray(self.labels) == self.terminal_clusters[i])[0]
sc_bp[loc_terminal_i,:] = 1.4
loc_i = np.where(sc_bp[:, i] > 0.8)[0]
val_pt = [sc_pt[pt_i] for pt_i in loc_i] # TODO, replace with array to speed up
# max_val_pt = np.percentile(np.asarray(val_pt),90)
max_val_pt = max(val_pt)
#print('gene exp max pt', max_val_pt)
loc_i_bp = np.where(sc_bp[:, i] > 0.000)[0] #0.001
loc_i_sc = np.where(np.asarray(sc_pt) <= max_val_pt)[0]
# print('loc i bp', loc_i_bp)
# print('loc i sc', loc_i_sc)
loc_ = np.intersect1d(loc_i_bp, loc_i_sc)
# print('loc_', loc_.shape)
gam_in = np.asarray(sc_pt)[loc_]
x = gam_in.reshape(-1, 1)
y = np.asarray(gene_exp)[loc_].reshape(-1, 1)
# print('Gene Expression:', gam_in.shape)
weights = np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
# np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
# print('weights',weights)
# print('weights ==0', np.sum(weights == 0))
# print('Gene Expression: setting up subplot number',i)
if len(loc_)>1:
#geneGAM = pg.LinearGAM(n_splines=20, spline_order=5, lam=10).fit(x, y, weights=weights)
geneGAM = pg.LinearGAM(n_splines=10, spline_order=4, lam=10).fit(x, y, weights=weights)
nx_spacing = 100
xval = np.linspace(min(sc_pt), max_val_pt, nx_spacing * 2)
yg = geneGAM.predict(X=xval)
else: print('loc_ has length zero')
ax.plot(xval, yg, color=cmap_[i], linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round', label='TS:' + str(self.terminal_clusters[i]))
plt.legend()
plt.title('Gene Expression ' + title_gene)
return
def run_subPARC(self):
root_user = self.root_user
X_data = self.data
too_big_factor = self.too_big_factor
small_pop = self.small_pop
jac_std_global = self.jac_std_global
jac_weighted_edges = self.jac_weighted_edges
n_elements = X_data.shape[0]
# if n_elements < 2000: self.knn = 10
n_elements = X_data.shape[0]
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=self.knn)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
#### construct full graph
row_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
print('size neighbor array', neighbor_array.shape)
num_neigh = neighbor_array.shape[1]
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (1. / (distance_array.flatten() + 0.05)).tolist()
# if local_pruning_bool == True: print('share of neighbors discarded in local distance pruning %.1f' % (discard_count / neighbor_array.size))
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
#DO MAGIC IMPUTATION#
if self.do_magic == True:
from sklearn.preprocessing import normalize
magic_steps = 3
Transition_full_graph = normalize(csr_full_graph, norm='l1', axis=1) ** magic_steps
imputed_data = pd.DataFrame(np.dot(Transition_full_graph.todense(), data), index=data.index, columns=data.columns )
n_original_comp, n_original_comp_labels = connected_components(csr_full_graph, directed=False)
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
G = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist) # list of jaccard weights
ig_fullgraph = ig.Graph(list(edgelist), edge_attrs={'weight': sim_list})
ig_fullgraph.simplify(combine_edges='sum')
inv_simlist = [1 - i for i in sim_list]
# full_graph_shortpath = ig.Graph(list(edgelist), edge_attrs={'weight': inv_simlist}) #the weights reflect distances
# full_graph_shortpath.simplify(combine_edges='sum')
# self.full_graph_shortpath = full_graph_shortpath
self.full_graph_shortpath = self.full_graph_paths(X_data, n_original_comp)
####
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources, targets))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
# print('average degree of prejacard graph is %.1f'% (np.mean(G.degree())))
# print('computing Jaccard metric')
sim_list = G.similarity_jaccard(pairs=edgelist_copy)
print('commencing global pruning')
sim_list_array = np.asarray(sim_list)
edge_list_copy_array = np.asarray(edgelist_copy)
if jac_std_global == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_global * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
print('Share of edges kept after Global Pruning %.2f' % (len(strong_locs) / len(sim_list)), '%')
new_edgelist = list(edge_list_copy_array[strong_locs])
sim_list_new = list(sim_list_array[strong_locs])
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
# print('average degree of graph is %.1f' % (np.mean(G_sim.degree())))
G_sim.simplify(combine_edges='sum') # "first"
# print('average degree of SIMPLE graph is %.1f' % (np.mean(G_sim.degree())))
print('commencing community detection')
if jac_weighted_edges == True:
start_leiden = time.time()
# print('call leiden on weighted graph for ', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
else:
start_leiden = time.time()
# print('call leiden on unweighted graph', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
time_end_PARC = time.time()
# print('Q= %.1f' % (partition.quality()))
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
print(pop_list_1)
too_big = False
# print('labels found after Leiden', set(list(PARC_labels_leiden.T)[0])) will have some outlier clusters that need to be added to a cluster if a cluster has members that are KNN
cluster_i_loc = np.where(PARC_labels_leiden == 0)[
0] # the 0th cluster is the largest one. so if cluster 0 is not too big, then the others wont be too big either
pop_i = len(cluster_i_loc)
print('largest cluster population', pop_i, too_big_factor, n_elements)
if pop_i > too_big_factor * n_elements: # 0.4
too_big = True
print('too big is', too_big)
cluster_big_loc = cluster_i_loc
list_pop_too_bigs = [pop_i]
cluster_too_big = 0
while too_big == True:
X_data_big = X_data[cluster_big_loc, :]
print(X_data_big.shape)
PARC_labels_leiden_big = self.run_toobig_subPARC(X_data_big)
# print('set of new big labels ', set(PARC_labels_leiden_big.flatten()))
PARC_labels_leiden_big = PARC_labels_leiden_big + 1000
# print('set of new big labels +1000 ', set(list(PARC_labels_leiden_big.flatten())))
pop_list = []
for item in set(list(PARC_labels_leiden_big.flatten())):
pop_list.append([item, list(PARC_labels_leiden_big.flatten()).count(item)])
# print('pop of new big labels', pop_list)
jj = 0
print('shape PARC_labels_leiden', PARC_labels_leiden.shape)
for j in cluster_big_loc:
PARC_labels_leiden[j] = PARC_labels_leiden_big[jj]
jj = jj + 1
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
print('new set of labels ')
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
print(pop_list_1, set(PARC_labels_leiden))
too_big = False
set_PARC_labels_leiden = set(PARC_labels_leiden)
PARC_labels_leiden = np.asarray(PARC_labels_leiden)
for cluster_ii in set_PARC_labels_leiden:
cluster_ii_loc = np.where(PARC_labels_leiden == cluster_ii)[0]
pop_ii = len(cluster_ii_loc)
not_yet_expanded = pop_ii not in list_pop_too_bigs
if pop_ii > too_big_factor * n_elements and not_yet_expanded == True:
too_big = True
print('cluster', cluster_ii, 'is too big and has population', pop_ii)
cluster_big_loc = cluster_ii_loc
cluster_big = cluster_ii
big_pop = pop_ii
if too_big == True:
list_pop_too_bigs.append(big_pop)
print('cluster', cluster_big, 'is too big with population', big_pop, '. It will be expanded')
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop: # 10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
time_smallpop = time.time()
while (small_pop_exist) == True & (time.time() - time_smallpop < 15):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
PARC_labels_leiden = list(PARC_labels_leiden.flatten())
# print('final labels allocation', set(PARC_labels_leiden))
pop_list = []
pop_list_raw = []
for item in range(len(set(PARC_labels_leiden))):
pop_item = PARC_labels_leiden.count(item)
pop_list.append((item, pop_item))
pop_list_raw.append(pop_item)
print('list of cluster labels and populations', len(pop_list), pop_list)
self.labels = PARC_labels_leiden # list
n_clus = len(set(self.labels))
##determine majority truth
if self.pseudotime == True:
## Make cluster-graph (1)
vc_graph = ig.VertexClustering(ig_fullgraph,
membership=PARC_labels_leiden) # jaccard weights, bigger is better
vc_graph_old = ig.VertexClustering(G_sim, membership=PARC_labels_leiden)
# print('vc graph G_sim', vc_graph)
vc_graph = vc_graph.cluster_graph(combine_edges='sum')
vc_graph_old = vc_graph_old.cluster_graph(combine_edges='sum')
# print('vc graph G_sim', vc_graph)
# print('vc graph G_sim old', vc_graph_old)
reweighted_sparse_vc, edgelist = self.recompute_weights(vc_graph, pop_list_raw)
print('len old edge list', edgelist) # 0.15 for CD34
if self.dataset == 'toy': # ''humanCD34':# == False:
global_pruning_std = 2
print('Toy: global cluster graph pruning level', global_pruning_std)
# toy data is usually simpler so we dont need to prune the links as the clusters are usually well separated such that spurious links dont exist
elif self.dataset == 'bcell':
global_pruning_std = 0.15
print('Bcell: global cluster graph pruning level', global_pruning_std)
else:
global_pruning_std = 0.15
print('Humancd34: global cluster graph pruning level', global_pruning_std)
edgeweights, edgelist, comp_labels = local_pruning_clustergraph_mst(reweighted_sparse_vc,
global_pruning_std=global_pruning_std,
preserve_disconnected=self.preserve_disconnected) # 0.8 on 20knn and 40ncomp #0.15
self.connected_comp_labels = comp_labels
print('final comp labels set', set(comp_labels))
print('len new edge list', edgelist)
locallytrimmed_g = ig.Graph(edgelist, edge_attrs={'weight': edgeweights.tolist()})
# print('locally trimmed_g', locallytrimmed_g)
locallytrimmed_g = locallytrimmed_g.simplify(combine_edges='sum')
# print('locally trimmed and simplified', locallytrimmed_g)
locallytrimmed_sparse_vc = get_sparse_from_igraph(locallytrimmed_g, weight_attr='weight')
layout = locallytrimmed_g.layout_fruchterman_reingold(
weights='weight') ##final layout based on locally trimmed
# globally trimmed link
sources, targets = locallytrimmed_sparse_vc.nonzero()
edgelist_simple = list(zip(sources.tolist(), targets.tolist()))
edgelist_unique = set(tuple(sorted(l)) for l in edgelist_simple) # keep only one of (0,1) and (1,0)
self.edgelist_unique = edgelist_unique
self.edgelist = edgelist
x_lazy = self.x_lazy
alpha_teleport = self.alpha_teleport
# number of components
graph_dict = {}
n_components, labels = connected_components(csgraph=locallytrimmed_sparse_vc, directed=False,
return_labels=True)
print('there are ', n_components, 'components in the graph')
df_graph = pd.DataFrame(locallytrimmed_sparse_vc.todense())
df_graph['cc'] = labels
df_graph['pt'] = float('NaN')
df_graph['markov_pt'] = float('NaN')
df_graph['majority_truth'] = 'maj truth'
df_graph['graph_node_label'] = 'node label'
set_parc_labels = list(set(PARC_labels_leiden))
set_parc_labels.sort()
print('parc labels', set_parc_labels)
terminal_clus = []
node_deg_list = []
super_terminal_clus_revised = []
pd_columnnames_terminal = []
dict_terminal_super_sub_pairs = {}
self.root = []
for comp_i in range(n_components):
loc_compi = np.where(labels == comp_i)[0]
print('loc_compi', loc_compi)
a_i = df_graph.iloc[loc_compi][loc_compi].values
a_i = csr_matrix(a_i, (a_i.shape[0], a_i.shape[0]))
cluster_labels_subi = [x for x in loc_compi]
sc_labels_subi = [PARC_labels_leiden[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
sc_truelabels_subi = [self.true_label[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
if self.dataset == 'toy':
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
print('super node degree', self.super_node_degree_list)
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
else:
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
elif self.dataset == 'humanCD34':
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_HumanCD34(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
elif self.dataset == 'bcell':
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
'''
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_bcell(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
'''
else: # if this is p0.run()
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
self.root.append(root_i)
for item in node_deg_list_i:
node_deg_list.append(item)
print('a_i shape, true labels shape', a_i.shape, len(sc_truelabels_subi), len(sc_labels_subi))
new_root_index_found = False
for ii, llabel in enumerate(cluster_labels_subi):
if root_i == llabel:
new_root_index = ii
new_root_index_found = True
print('new root index', new_root_index)
if new_root_index_found == False:
print('cannot find the new root index')
new_root_index = 0
hitting_times, roundtrip_times = self.compute_hitting_time(a_i, root=new_root_index,
x_lazy=x_lazy, alpha_teleport=alpha_teleport)
# rescale hitting times
very_high = np.mean(hitting_times) + 1.5 * np.std(hitting_times)
without_very_high_pt = [iii for iii in hitting_times if iii < very_high]
new_very_high = np.mean(without_very_high_pt) + np.std(without_very_high_pt)
print('very high, and new very high', very_high, new_very_high)
new_hitting_times = [x if x < very_high else very_high for x in hitting_times]
hitting_times = np.asarray(new_hitting_times)
scaling_fac = 10 / max(hitting_times)
hitting_times = hitting_times * scaling_fac
s_ai, t_ai = a_i.nonzero()
edgelist_ai = list(zip(s_ai, t_ai))
edgeweights_ai = a_i.data
# print('edgelist ai', edgelist_ai)
# print('edgeweight ai', edgeweights_ai)
biased_edgeweights_ai = get_biased_weights(edgelist_ai, edgeweights_ai, hitting_times)
# biased_sparse = csr_matrix((biased_edgeweights, (row, col)))
adjacency_matrix_ai = np.zeros((a_i.shape[0], a_i.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix_ai[start, end] = biased_edgeweights_ai[i]
markov_hitting_times_ai = self.simulate_markov(adjacency_matrix_ai,
new_root_index) # +adjacency_matrix.T))
print('markov_hitting times ')
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
very_high = np.mean(markov_hitting_times_ai) + 1.5 * np.std(markov_hitting_times_ai)
very_high = min(very_high, max(markov_hitting_times_ai))
without_very_high_pt = [iii for iii in markov_hitting_times_ai if iii < very_high]
new_very_high = min(np.mean(without_very_high_pt) + np.std(without_very_high_pt), very_high)
print('very high, and new very high', very_high, new_very_high)
new_markov_hitting_times_ai = [x if x < very_high else very_high for x in markov_hitting_times_ai]
for eee, ttt in enumerate(new_markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
markov_hitting_times_ai = np.asarray(new_markov_hitting_times_ai)
scaling_fac = 10 / max(markov_hitting_times_ai)
markov_hitting_times_ai = markov_hitting_times_ai * scaling_fac
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
print('markov hitting times', [(i, j) for i, j in enumerate(markov_hitting_times_ai)])
print('hitting times', [(i, j) for i, j in enumerate(hitting_times)])
markov_hitting_times_ai = (markov_hitting_times_ai )#+ hitting_times)*.5 #consensus
adjacency_matrix_csr_ai = sparse.csr_matrix(adjacency_matrix_ai)
(sources, targets) = adjacency_matrix_csr_ai.nonzero()
edgelist_ai = list(zip(sources, targets))
weights_ai = adjacency_matrix_csr_ai.data
bias_weights_2_ai = get_biased_weights(edgelist_ai, weights_ai, markov_hitting_times_ai, round_no=2)
adjacency_matrix2_ai = np.zeros((adjacency_matrix_ai.shape[0], adjacency_matrix_ai.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix2_ai[start, end] = bias_weights_2_ai[i]
if self.super_terminal_cells == False:
terminal_clus_ai = self.get_terminal_clusters(adjacency_matrix2_ai, markov_hitting_times_ai,
new_root_index)
for i in terminal_clus_ai:
terminal_clus.append(cluster_labels_subi[i])
elif len(self.super_terminal_clusters) > 0:
sub_terminal_clus_temp_ = []
terminal_clus_ai = []
for i in self.super_terminal_clusters:
print('super cluster terminal label', i)
sub_terminal_clus_temp_loc = np.where(np.asarray(self.super_cluster_labels) == i)[0]
# print('sub_terminal_clus_temp_loc', sub_terminal_clus_temp_loc)
temp_set = set(list(np.asarray(self.labels)[sub_terminal_clus_temp_loc]))
# print('temp set', temp_set)
temp_max_pt = 0
most_likely_sub_terminal = False
count_frequency_super_in_sub = 0
for j in temp_set:
super_cluster_composition_loc = np.where(np.asarray(self.labels) == j)[0]
super_cluster_composition = self.func_mode(
list(np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]))
# print('the composision of sub cluster', j, 'is mostly', super_cluster_composition)
if (markov_hitting_times_ai[j] > temp_max_pt) & (super_cluster_composition == i):
temp_max_pt = markov_hitting_times_ai[j]
print('super, j and temp max pt', i, j, temp_max_pt)
most_likely_sub_terminal = j
if most_likely_sub_terminal == False:
print('no sub cluster has majority made of super-cluster ', i)
for j in temp_set:
count_frequency_super_in_sub_temp = list(
np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]).count(j)
if (markov_hitting_times_ai[j] > temp_max_pt) & (
count_frequency_super_in_sub_temp > count_frequency_super_in_sub):
count_frequency_super_in_sub = count_frequency_super_in_sub_temp
temp_max_pt = markov_hitting_times_ai[j]
most_likely_sub_terminal = j
sub_terminal_clus_temp_.append(most_likely_sub_terminal)
if (markov_hitting_times_ai[most_likely_sub_terminal] > np.percentile(
np.asarray(markov_hitting_times_ai), 30)):
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
super_terminal_clus_revised.append(i)
terminal_clus.append(most_likely_sub_terminal)
terminal_clus_ai.append(
np.where(np.asarray(cluster_labels_subi) == most_likely_sub_terminal)[0][0]) # =i
# terminal_clus_ai.append(most_likely_sub_terminal)
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal)
else:
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal, 'but the pseudotime is too low')
# terminal_clus.append(9999)
# super_terminal_clus_revised.append(9999)
else:
print('super terminal cells', self.super_terminal_cells)
print([self.labels[ti] for ti in
self.super_terminal_cells]) # find the sub-cluster which contains the single-cell-superterminal
temp = [self.labels[ti] for ti in self.super_terminal_cells if
self.labels[ti] in cluster_labels_subi]
terminal_clus_ai = []
for i in temp:
terminal_clus_ai.append(np.where(np.asarray(cluster_labels_subi) == i)[0][0])
terminal_clus.append(i)
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
# for i in temp:
# terminal_clus.append(i)
print('terminal clus in this a_i', terminal_clus_ai)
print('final terminal clus', terminal_clus)
for target_terminal in terminal_clus_ai:
#prob_ai = self.prob_reaching_terminal_state(target_terminal, terminal_clus_ai, adjacency_matrix2_ai, new_root_index, pt=markov_hitting_times_ai, num_sim=500)
prob_ai = self.simulate_branch_probability(target_terminal, terminal_clus_ai, adjacency_matrix2_ai,
new_root_index, pt=markov_hitting_times_ai, num_sim=500) #50 ToDO change back to 500 = numsim
df_graph['terminal_clus' + str(cluster_labels_subi[target_terminal])] = 0.0000000
pd_columnnames_terminal.append('terminal_clus' + str(cluster_labels_subi[target_terminal]))
print('prob ai for target terminal', target_terminal, prob_ai)
for k, prob_ii in enumerate(prob_ai):
df_graph.at[cluster_labels_subi[k], 'terminal_clus' + str(
cluster_labels_subi[target_terminal])] = prob_ii
bp_array = df_graph[pd_columnnames_terminal].values
bp_array[np.isnan(bp_array)]=0.00000001
print('final bp_array NOT normed by rowsum', bp_array)
bp_array = bp_array / bp_array.sum(axis=1)[:, None]
bp_array[np.isnan(bp_array)] = 0.00000001
print('final bp_array normed by rowsum', bp_array)
for ei, ii in enumerate(loc_compi):
df_graph.at[ii, 'pt'] = hitting_times[ei]
df_graph.at[ii, 'graph_node_label'] = graph_node_label[ei]
df_graph.at[ii, 'majority_truth'] = graph_node_label[ei]
df_graph.at[ii, 'markov_pt'] = markov_hitting_times_ai[ei]
locallytrimmed_g.vs["label"] = df_graph['graph_node_label'].values
hitting_times = df_graph['pt'].values
if len(super_terminal_clus_revised) > 0:
self.revised_super_terminal_clusters = super_terminal_clus_revised
else:
self.revised_super_terminal_clusters = self.super_terminal_clusters
self.hitting_times = hitting_times # * 1000
self.markov_hitting_times = df_graph['markov_pt'].values
self.terminal_clusters = terminal_clus
print('terminal clusters', terminal_clus)
self.node_degree_list = node_deg_list
self.project_branch_probability_sc(bp_array)
self.dict_terminal_super_sub_pairs = dict_terminal_super_sub_pairs
hitting_times = self.markov_hitting_times
bias_weights_2_all = get_biased_weights(edgelist, edgeweights, self.markov_hitting_times, round_no=2)
row_list = []
col_list = []
for (rowi, coli) in edgelist:
row_list.append(rowi)
col_list.append(coli)
# print('shape', a_i.shape[0], a_i.shape[0], row_list)
temp_csr = csr_matrix((np.array(bias_weights_2_all), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
if self.dataset == 'toy': # 'humanCD34':#False:
visual_global_pruning_std = 0.15
max_outgoing = 4
else:
visual_global_pruning_std = 1 # 0.15#0 for human
max_outgoing = 2
# glob_std_pruning =0 and max_out = 2 for HumanCD34 to simplify structure
edgeweights_maxout_2, edgelist_maxout_2, comp_labels_2 = local_pruning_clustergraph_mst(temp_csr,
global_pruning_std=visual_global_pruning_std,
max_outgoing=max_outgoing,
preserve_disconnected=self.preserve_disconnected)
row_list = []
col_list = []
for (rowi, coli) in edgelist_maxout_2:
row_list.append(rowi)
col_list.append(coli)
temp_csr = csr_matrix((np.array(edgeweights_maxout_2), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
temp_csr = temp_csr.transpose().todense() + temp_csr.todense()
temp_csr = np.tril(temp_csr, -1) # elements along the main diagonal and above are set to zero
temp_csr = csr_matrix(temp_csr)
edgeweights_maxout_2 = temp_csr.data
scale_factor = max(edgeweights_maxout_2) - min(edgeweights_maxout_2)
edgeweights_maxout_2 = [((wi + .1) * 2.5 / scale_factor) + 0.1 for wi in edgeweights_maxout_2]
sources, targets = temp_csr.nonzero()
edgelist_maxout_2 = list(zip(sources.tolist(), targets.tolist()))
self.edgelist_maxout = edgelist_maxout_2
self.edgeweights_maxout = edgeweights_maxout_2
remove_outliers = hitting_times
threshold = np.percentile(remove_outliers, 95) # np.mean(remove_outliers) + 1* np.std(remove_outliers)
th_hitting_times = [x if x < threshold else threshold for x in hitting_times]
remove_outliers_low = hitting_times[hitting_times < (np.mean(hitting_times) - 0.3 * np.std(hitting_times))]
threshold_low = np.mean(remove_outliers_low) - 0.3 * np.std(remove_outliers_low)
threshold_low = np.percentile(remove_outliers_low, 5)
# print('thresh low', threshold_low)
th_hitting_times = [x if x > threshold_low else threshold_low for x in th_hitting_times]
scaled_hitting_times = (th_hitting_times - np.min(th_hitting_times))
scaled_hitting_times = scaled_hitting_times * (1000 / np.max(scaled_hitting_times))
self.scaled_hitting_times = scaled_hitting_times
# self.single_cell_pt = self.project_hittingtimes_sc(self.hitting_times)
# self.single_cell_pt_stationary_bias = self.project_hittingtimes_sc(self.stationary_hitting_times.flatten())
print('markov hitting times to put in single cell project', self.markov_hitting_times)
self.single_cell_pt_markov = self.project_hittingtimes_sc(self.markov_hitting_times)
print('markov hitting times to put in single cell project', self.single_cell_pt_markov)
# self.dijkstra_hitting_times = self.path_length_onbias(edgelist, biased_edgeweights)
# print('dijkstra hitting times', [(i,j) for i,j in enumerate(self.dijkstra_hitting_times)])
# self.single_cell_pt_dijkstra_bias = self.project_hittingtimes_sc(self.dijkstra_hitting_times)
# threshold = np.mean(scaled_hitting_times)+0.25*np.std(scaled_hitting_times)
threshold = int(threshold)
scaled_hitting_times = scaled_hitting_times.astype(int)
# print('scaled hitting times')
# print(scaled_hitting_times)
pal = ig.drawing.colors.AdvancedGradientPalette(['yellow', 'green', 'blue'], n=1001)
all_colors = []
# print('100 scaled hitting', scaled_hitting_times)
for i in scaled_hitting_times:
all_colors.append(pal.get(int(i))[0:3])
# print('extract all colors', zip(scaled_hitting_times,all_colors))
locallytrimmed_g.vs['hitting_times'] = scaled_hitting_times
locallytrimmed_g.vs['color'] = [pal.get(i)[0:3] for i in scaled_hitting_times]
self.group_color = [colors.to_hex(v) for v in locallytrimmed_g.vs['color']] # based on ygb scale
viridis_cmap = cm.get_cmap('viridis_r')
self.group_color_cmap = [colors.to_hex(v) for v in
viridis_cmap(scaled_hitting_times / 1000)] # based on ygb scale
self.graph_node_label = df_graph['graph_node_label'].values
self.edgeweight = [e['weight'] * 1 for e in locallytrimmed_g.es]
print('self edge weight', len(self.edgeweight), self.edgeweight)
print('self edge list', len(self.edgelist_unique), self.edgelist_unique)
self.graph_node_pos = layout.coords
f, ((ax, ax1, ax2)) = plt.subplots(1, 3, sharey=True)
self.draw_piechart_graph(ax, ax1, ax2)
plt.show()
return
def draw_piechart_graph(self, ax, ax1, ax2, type_pt='original', ):
arrow_head_w = 0.2
edgeweight_scale = 1
node_pos = self.graph_node_pos
edgelist = list(self.edgelist_maxout)
edgeweight = self.edgeweights_maxout
node_pos = np.asarray(node_pos)
graph_node_label = self.graph_node_label
if type_pt == 'original': pt = self.scaled_hitting_times
if type_pt == 'biased_stationary': pt = self.biased_hitting_times_stationary
if type_pt == 'markov': pt = self.markov_hitting_times
import matplotlib.lines as lines
n_groups = len(set(self.labels)) # node_pos.shape[0]
n_truegroups = len(set(self.true_label))
group_pop = np.zeros([n_groups, 1])
group_frac = pd.DataFrame(np.zeros([n_groups, n_truegroups]), columns=list(set(self.true_label)))
for group_i in set(self.labels):
loc_i = np.where(self.labels == group_i)[0]
group_pop[group_i] = len(loc_i) # np.sum(loc_i) / 1000 + 1
true_label_in_group_i = list(np.asarray(self.true_label)[[loc_i]])
for ii in set(true_label_in_group_i):
group_frac[ii][group_i] = true_label_in_group_i.count(ii)
group_frac = group_frac.div(group_frac.sum(axis=1), axis=0)
line_true = np.linspace(0, 1, n_truegroups)
color_true_list = [plt.cm.jet(color) for color in line_true]
sct = ax.scatter(
node_pos[:, 0], node_pos[:, 1],
c='white', edgecolors='face', s=group_pop, cmap='jet')
print('draw triangle edgelist', len(edgelist), edgelist)
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax.add_line(lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='grey', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.2))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250], shape='full',
lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
# ax.plot(xp, smooth, linewidth=edgeweight[e_i], c='pink')
else:
ax.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
trans = ax.transData.transform
bbox = ax.get_position().get_points()
ax_x_min = bbox[0, 0]
ax_x_max = bbox[1, 0]
ax_y_min = bbox[0, 1]
ax_y_max = bbox[1, 1]
ax_len_x = ax_x_max - ax_x_min
ax_len_y = ax_y_max - ax_y_min
trans2 = ax.transAxes.inverted().transform
pie_axs = []
pie_size_ar = ((group_pop - np.min(group_pop)) / (np.max(group_pop) - np.min(group_pop)) + 0.5) / 10
for node_i in range(n_groups):
pie_size = pie_size_ar[node_i][0]
x1, y1 = trans(node_pos[node_i]) # data coordinates
xa, ya = trans2((x1, y1)) # axis coordinates
xa = ax_x_min + (xa - pie_size / 2) * ax_len_x
ya = ax_y_min + (ya - pie_size / 2) * ax_len_y
# clip, the fruchterman layout sometimes places below figure
# if ya < 0: ya = 0
# if xa < 0: xa = 0
rect = [xa, ya, pie_size * ax_len_x, pie_size * ax_len_y]
frac = group_frac.iloc[node_i].values
pie_axs.append(plt.axes(rect, frameon=False))
pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
pie_axs[node_i].set_xticks([])
pie_axs[node_i].set_yticks([])
pie_axs[node_i].set_aspect('equal')
pie_axs[node_i].text(0.5, 0.5, graph_node_label[node_i])
patches, texts = pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
labels = list(set(self.true_label))
plt.legend(patches, labels, loc=(-5, -5), fontsize=6)
if self.too_big_factor > 0.1:
is_sub = ' super clusters'
else:
is_sub = ' sub clusters'
ti = 'Reference Group Membership. K=' + str(self.knn) + '. ncomp = ' + str(self.ncomp) + is_sub
ax.set_title(ti)
title_list = ["PT using Markov Simulation", "PT on undirected original graph"]
for i, ax_i in enumerate([ax1, ax2]):
print("drawing axis", i)
if i == 0: pt = self.markov_hitting_times
if i == 1: pt = self.hitting_times
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax_i.add_line(
lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='black', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.5))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax_i.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250],
shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
else:
ax_i.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
c_edge = []
l_width = []
for ei, pti in enumerate(pt):
if ei in self.terminal_clusters:
c_edge.append('red')
l_width.append(1.5)
else:
c_edge.append('gray')
l_width.append(0.0)
gp_scaling = 500 / max(group_pop)
print(gp_scaling, 'gp_scaline')
group_pop_scale = group_pop * gp_scaling
ax_i.scatter(node_pos[:, 0], node_pos[:, 1], s=group_pop_scale, c=pt, cmap='viridis_r', edgecolors=c_edge,
alpha=1, zorder=3, linewidth=l_width)
for ii in range(node_pos.shape[0]):
ax_i.text(node_pos[ii, 0] + 0.5, node_pos[ii, 1] + 0.5, 'c' + str(ii), color='black', zorder=4)
title_pt = title_list[i]
ax_i.set_title(title_pt)
def accuracy(self, onevsall=1):
true_labels = self.true_label
Index_dict = {}
PARC_labels = self.labels
N = len(PARC_labels)
n_cancer = list(true_labels).count(onevsall)
n_pbmc = N - n_cancer
for k in range(N):
Index_dict.setdefault(PARC_labels[k], []).append(true_labels[k])
num_groups = len(Index_dict)
sorted_keys = list(sorted(Index_dict.keys()))
error_count = []
pbmc_labels = []
thp1_labels = []
fp, fn, tp, tn, precision, recall, f1_score = 0, 0, 0, 0, 0, 0, 0
for kk in sorted_keys:
vals = [t for t in Index_dict[kk]]
majority_val = self.func_mode(vals)
if majority_val == onevsall: print('cluster', kk, ' has majority', onevsall, 'with population', len(vals))
if kk == -1:
len_unknown = len(vals)
print('len unknown', len_unknown)
if (majority_val == onevsall) and (kk != -1):
thp1_labels.append(kk)
fp = fp + len([e for e in vals if e != onevsall])
tp = tp + len([e for e in vals if e == onevsall])
list_error = [e for e in vals if e != majority_val]
e_count = len(list_error)
error_count.append(e_count)
elif (majority_val != onevsall) and (kk != -1):
pbmc_labels.append(kk)
tn = tn + len([e for e in vals if e != onevsall])
fn = fn + len([e for e in vals if e == onevsall])
error_count.append(len([e for e in vals if e != majority_val]))
predict_class_array = np.array(PARC_labels)
PARC_labels_array = np.array(PARC_labels)
number_clusters_for_target = len(thp1_labels)
for cancer_class in thp1_labels:
predict_class_array[PARC_labels_array == cancer_class] = 1
for benign_class in pbmc_labels:
predict_class_array[PARC_labels_array == benign_class] = 0
predict_class_array.reshape((predict_class_array.shape[0], -1))
error_rate = sum(error_count) / N
n_target = tp + fn
tnr = tn / n_pbmc
fnr = fn / n_cancer
tpr = tp / n_cancer
fpr = fp / n_pbmc
if tp != 0 or fn != 0: recall = tp / (tp + fn) # ability to find all positives
if tp != 0 or fp != 0: precision = tp / (tp + fp) # ability to not misclassify negatives as positives
if precision != 0 or recall != 0:
f1_score = precision * recall * 2 / (precision + recall)
majority_truth_labels = np.empty((len(true_labels), 1), dtype=object)
for cluster_i in set(PARC_labels):
cluster_i_loc = np.where(np.asarray(PARC_labels) == cluster_i)[0]
true_labels = np.asarray(true_labels)
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = majority_truth
majority_truth_labels = list(majority_truth_labels.flatten())
accuracy_val = [error_rate, f1_score, tnr, fnr, tpr, fpr, precision,
recall, num_groups, n_target]
return accuracy_val, predict_class_array, majority_truth_labels, number_clusters_for_target
def run_PARC(self):
print('input data has shape', self.data.shape[0], '(samples) x', self.data.shape[1], '(features)')
self.ncomp = self.data.shape[1]
pop_list = []
for item in set(list(self.true_label)):
pop_list.append([item, list(self.true_label).count(item)])
# print("population composition", pop_list)
if self.true_label is None:
self.true_label = [1] * self.data.shape[0]
list_roc = []
time_start_total = time.time()
time_start_knn = time.time()
self.knn_struct = self.make_knn_struct()
time_end_knn_struct = time.time() - time_start_knn
# Query dataset, k - number of closest elements (returns 2 numpy arrays)
self.run_subPARC()
run_time = time.time() - time_start_total
print('time elapsed {:.1f} seconds'.format(run_time))
targets = list(set(self.true_label))
N = len(list(self.true_label))
self.f1_accumulated = 0
self.f1_mean = 0
self.stats_df = pd.DataFrame({'jac_std_global': [self.jac_std_global], 'dist_std_local': [self.dist_std_local],
'runtime(s)': [run_time]})
self.majority_truth_labels = []
if len(targets) > 1:
f1_accumulated = 0
f1_acc_noweighting = 0
for onevsall_val in targets:
print('target is', onevsall_val)
vals_roc, predict_class_array, majority_truth_labels, numclusters_targetval = self.accuracy(
onevsall=onevsall_val)
f1_current = vals_roc[1]
print('target', onevsall_val, 'has f1-score of %.2f' % (f1_current * 100))
f1_accumulated = f1_accumulated + f1_current * (list(self.true_label).count(onevsall_val)) / N
f1_acc_noweighting = f1_acc_noweighting + f1_current
list_roc.append(
[self.jac_std_global, self.dist_std_local, onevsall_val] + vals_roc + [numclusters_targetval] + [
run_time])
f1_mean = f1_acc_noweighting / len(targets)
print("f1-score (unweighted) mean %.2f" % (f1_mean * 100), '%')
print('f1-score weighted (by population) %.2f' % (f1_accumulated * 100), '%')
df_accuracy = pd.DataFrame(list_roc,
columns=['jac_std_global', 'dist_std_local', 'onevsall-target', 'error rate',
'f1-score', 'tnr', 'fnr',
'tpr', 'fpr', 'precision', 'recall', 'num_groups',
'population of target', 'num clusters', 'clustering runtime'])
self.f1_accumulated = f1_accumulated
self.f1_mean = f1_mean
self.stats_df = df_accuracy
self.majority_truth_labels = majority_truth_labels
return
def run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823'):
norm_df_pal = pd.DataFrame(ad.X)
# print('norm df', norm_df_pal)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.index = new
norm_df_pal.columns =[i for i in ad.var_names]
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(revised_clus, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# start_cell = 'c4823' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, knn, ncomps)
#plt.show()
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
#imp_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100.csv')
genes = ['GATA1', 'GATA2', 'ITGA2B']#, 'SPI1']#['CD34','GATA1', 'IRF8','ITGA2B']
gene_trends = palantir.presults.compute_gene_trends( pr_res, imp_df.loc[:, genes])
palantir.plot.plot_gene_trends(gene_trends)
genes = ['MPO','ITGAX','IRF8','CSF1R','IL3RA']#'CD34','MPO', 'CD79B'
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, genes])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
def slalom_human():
import os
import slalom
from slalom import plotFactors, plotRelevance, plotLoadings, saveFA, dumpFA
data_dir = '/home/shobi/Trajectory/Datasets/'
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad') # 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
annoDB = 'custom' # ''MSigDB'
annoFile = os.path.join(data_dir, 'geneset.gmt')
data_slalom = slalom.utils.load_txt(df=df_.T, annoFiles=annoFile, annoDBs=annoDB)
print("Loaded {:d} cells, {:d} genes".format(data_slalom['Y'].shape[0], data_slalom['Y'].shape[1]))
print("Annotation: {:d} terms".format(len(data_slalom['terms'])))
print('data terms', data_slalom['terms'])
print(data_slalom['genes'])
print(data_slalom['lab'])
# I: indicator matrix that assigns genes to pathways
I = data_slalom['I'] # if loaded from the hdf file change to I = data['IMSigDB']
# Y: log expresison values
Y = data_slalom['Y']
# terms: ther names of the terms
terms = data_slalom['terms']
print("terms", terms)
# gene_ids: the ids of the genes in Y
gene_ids = data_slalom['genes']
print('gene_ids', gene_ids)
print(I.shape, Y.shape, terms.shape)
# initialize FA instance, here using a Gaussian noise model and fitting 3 dense hidden factors
FA = slalom.initFA(Y, terms, I, gene_ids=gene_ids, noise='gauss', nHidden=3, minGenes=1)
FA.train()
# print diagnostics
FA.printDiagnostics()
fig = plotRelevance(FA, madFilter=0)
# idx=FA.getTermIndex(['G2m checkpoint', 'P53 pathway'])
# print('idx',idx)
corrected_data = FA.regressOut(
terms=['M phase', 'Dna replication', 'Chromosome segregation', 'M phase of mitotic cell cycle',
'Organelle fission'])
print('corrected_data.shape', corrected_data.shape)
full_matrix = df_.copy()
print(full_matrix.head)
annotated_genes = np.array(data_slalom['genes'])[np.sum(data_slalom['I'], axis=1) != 0]
print('annotated genes', len(annotated_genes), annotated_genes)
full_matrix[annotated_genes] = corrected_data
print('full shape ', full_matrix)
return full_matrix
def main_Human(ncomps=100, knn=30, p0_random_seed=4, run_palantir_func = False):
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
ncomps = ncomps# 40 ncomps and 20KNN works well
knn = knn # 30
p0_random_seed =p0_random_seed
print('ncomp =', ncomps, ' knn=', knn, ' randseed=', p0_random_seed)
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(
ad.X) # slalom_human())#(ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=1000, log=True) #using this or the .X scaled version is pretty much the same.
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
if run_palantir_func == True:
run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823')
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = ['ITGAX']#['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
#'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
for gene_name in gene_list:# 'GATA2',
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
print('gene name', gene_name, loc_gata)
#print('xpca',norm_df['X_pca'])
true_label = nover_labels # revised_clus
print('p0 random seed', p0_random_seed)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.4,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/HumanCD34/", root=1,
root_user=4823, dataset='humanCD34', preserve_disconnected=True, random_seed=p0_random_seed) # *.4
p0.run_PARC()
super_labels = p0.labels
print('super labels', set(super_labels))
ad.obs['parc0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['parc0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA','ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
print('make the p0 matrix plot')
sc.pl.matrixplot(magic_ad, marker_genes, groupby='parc0_label')
'''
sc.tl.rank_genes_groups(ad, groupby='parc0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="parc0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='parc0_label', n_genes = 3) # plot the result
print('show the matrix plot')
'''
super_edges = p0.edgelist_maxout # p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p0.terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/HumanCD34/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=4823,
x_lazy=0.99, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=p0.terminal_clusters) # *.4super_terminal_cells = tsi_list
p1.run_PARC()
labels = p1.labels
ad.obs['parc1_label'] = [str(i) for i in labels]
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
'''
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=p1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # p1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
loaded_magic_df.head()
for gene_name in ['ITGA2B','IL3RA','ITGAX','IRF8']:#['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
#DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO':'MPO (Mono)', 'CD79B':'CD79B (B)','IRF8':'IRF8 (DC)', 'SPI1':'PU.1','CD34': 'CD34','CSF1R':'CSF1R (pDC. Up then Down in cDC)','IL3RA':'CD123 (pDC)','IRF4': 'IRF4 (pDC)', 'ITGAX':'ITGAX (cDCs)','CSF2RA':'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
#magic_ad=loaded_magic_df[gene_name]
p1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:15])
# print('tsne input size', adata_counts.obsm['X_pca'].shape)
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace( p0, p1, idx)
draw_trajectory_gams(embedding,super_clus_ds_PCA_loc, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
# final_super_terminal=p0.terminal clusters
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
lineP0 = np.linspace(0, 1, len(set(p0.labels)))
lineP1 = np.linspace(0, 1, len(set(p1.labels)))
# find the single-cell which is nearest to the average-location of a terminal cluster - for just the sub-set of downsampled points in the corresponding PCA-space
new_tsi_list = []
# find the single-cell which is nearest to the average-location of a terminal cluster
# TODO make a knn in the downsampled PCA-space
X_ds = adata_counts.obsm['X_pca'][:, 0:ncomps][idx]
p_ds = hnswlib.Index(space='l2', dim=ncomps)
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for tsi_item in tsi_list:
labelsq, distances = p_ds.knn_query(adata_counts.obsm['X_pca'][:, 0:ncomps][tsi_item, :], k=1)
new_tsi_list.append(labelsq[0][0])
# for old_tsi_i in tsi_list:
# temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
# labelsq, distances = p1.knn_struct.query(.knn_query(temp, k=1)
# print(labelsq[0])
# tsi_list.append(labelsq[0][0])
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
ff, (ax11, ax22) = plt.subplots(1, 2, sharey=True)
col_i = 0
for color, group in zip(line, set(true_label)):
marker_x = marker[random.randint(0, 5)]
where = np.where(np.asarray(true_label) == group)[0]
# ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=plt.cm.jet(color))
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax1.legend(fontsize=6)
ax1.set_title('true labels')
for color, group in zip(lineP0, set(p0.labels)):
where = np.where(super_labels == group)[0]
ax11.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax11.legend(fontsize=6)
ax11.set_title('p0 labels')
for color, group in zip(lineP1, set(p1.labels)):
where = np.where(labels == group)[0]
ax22.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax22.legend(fontsize=6)
ax22.set_title('p1 labels')
ax3.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
ax2.set_title("terminal clus from P0 super clus:" + str(ncomps) + '. knn:' + str(knn)+ 'randseed' +str( p0_random_seed))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
jj = 0
for ti in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(super_labels == ti)[0]
val_pt = [sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
x = embedding[labelsq[0], 0]
y = embedding[labelsq[0], 1]
# ax2.scatter(np.mean(x), np.mean(y), label='ts' + str(ti)+'M'+str(maj), c='red', s=15)
# ax2.scatter(x, y, label='TS' + str(ti), c='red', s=10)
# ax3.scatter(x, y, label='TS' + str(ti), c='red', s=10)
ax2.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1], label='TS' + str(ti), c='pink', s=18) # PCs HNSW
# ax3.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1], label='TS' + str(p1.labels[tsi_list[jj]]), c='pink',s=18)
ax2.text(embedding[new_tsi_list[jj], 0]+0.05, embedding[new_tsi_list[jj], 1]+ 0.05, 'TS' + str(ti), color='black', zorder=3)
# ax3.text(np.mean(x) + 0.05, np.mean(y) + 0.05, 'TS' + str(ti), color='black', zorder=3)
ax2.legend(fontsize=6)
jj = jj + 1
jj = 0
print('')
for ti in p1.terminal_clusters:
print('terminal ti', ti)
loc_i = np.where(np.asarray(labels) == ti)[0]
#print(np.where(labels == ti), np.where(np.asarray(labels) == ti) ,loc_i)
val_pt = [sc_pt_markov[i] for i in loc_i]
print(val_pt)
th_pt = np.percentile(val_pt, 0) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
x = embedding[labelsq[0], 0]
y = embedding[labelsq[0], 1]
# ax2.scatter(np.mean(x), np.mean(y), label='ts' + str(ti)+'M'+str(maj), c='red', s=15)
# ax2.scatter(x, y, label='TS' + str(ti), c='red', s=10)
# ax3.scatter(x, y, label='TS' + str(ti), c='red', s=10)
ax3.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1],
label='TS' + str(ti), c='pink', s=18)
ax3.text(embedding[new_tsi_list[jj], 0]+0.05, embedding[new_tsi_list[jj], 1] + 0.05, 'TS' + str(ti), color='black', zorder=3)
jj = jj + 1
draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def mainToy():
dataset = "Toy3" # ""Toy1" # GermlineLi #Toy1
## Dataset Germline Li https://zenodo.org/record/1443566#.XZlhEkEzZ5y
if dataset == "GermlineLine":
df_expression_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li.csv", 'rt',
delimiter=",")
print(df_expression_ids.shape)
# print(df_expression_ids[['cell_id',"week","ACTG2","STK31"]])[10:12]
df_counts = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_filteredcounts.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_labels.csv", 'rt',
delimiter=",")
# print(df_counts.shape, df_counts.head() ,df_ids.shape)
# X_counts = df_counts.values
# print(X_counts.shape)
# varnames = pd.Categorical(list(df_counts.columns))
adata_counts = sc.AnnData(df_counts, obs=df_ids)
print(adata_counts.obs)
sc.pp.filter_cells(adata_counts, min_counts=1)
print(adata_counts.n_obs)
sc.pp.filter_genes(adata_counts, min_counts=1) # only consider genes with more than 1 count
print(adata_counts.X.shape)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata_counts, key_n_counts='n_counts_all')
print(adata_counts.X.shape, len(list(adata_counts.var_names)))
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata_counts.X, flavor='cell_ranger', n_top_genes=1000, log=False)
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # , list(adata_counts.var_names))
adata_counts = adata_counts[:, filter_result.gene_subset]
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # ,list(adata_counts.var_names))
# subset the genes
sc.pp.normalize_per_cell(adata_counts) # renormalize after filtering
sc.pp.log1p(adata_counts) # log transform: adata_counts.X = log(adata_counts.X + 1)
sc.pp.scale(adata_counts) # scale to unit variance and shift to zero mean
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=20)
true_label = list(adata_counts.obs['week'])
sc.pp.neighbors(adata_counts, n_neighbors=10, n_pcs=20)
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='gender_week', legend_loc='right margin', palette='jet')
## Dataset Paul15 https://scanpy-tutorials.readthedocs.io/en/latest/paga-paul15.html
if dataset == 'Paul15':
root_user = "8Mk"
adata_counts = sc.datasets.paul15()
sc.pp.recipe_zheng17(adata_counts)
sc.tl.pca(adata_counts, svd_solver='arpack')
true_label = list(adata_counts.obs['paul15_clusters']) # PAUL
adata_counts.obs['group_id'] = true_label
# sc.pp.neighbors(adata_counts, n_neighbors=10)
# sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color=['paul15_clusters', 'Cma1'], legend_loc='on data')
if dataset.startswith('Toy'):
root_user = 'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy1":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000_ids.csv",
'rt', delimiter=",")
if dataset == "Toy2":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy3":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000_ids.csv", 'rt',
delimiter=",")
if dataset == "ToyCyclic":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy4":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000_ids.csv", 'rt',
delimiter=",")
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
true_label = df_ids['group_id']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=20) not helpful for toy data
ncomps = 50
knn = 30
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
'''
print(np.flatnonzero(adata_counts.obs['group_id'] == 'T1_M1')[0])
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == 'T1_M1')[0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps)#4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') #force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap')#4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
#sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'], title=['leiden (knn:'+str(knn)+' ncomps:'+str(ncomps)+')', 'group_id (ncomps:'+str(ncomps)+')','pseudotime (ncomps:'+str(ncomps)+')'])
#X = df_counts.values
print(palantir.__file__) #location of palantir source code
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000.csv")
print('counts',counts)
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts#palantir.preprocess.normalize_counts(counts)
pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps)
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) #n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'C108'#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
print('ms data', ms_data)
pr_res = palantir.core.run_palantir(ms_data, start_cell, num_waypoints=500,knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne)
plt.show()
'''
# clusters = palantir.utils.determine_cell_clusters(pca_projections)
from sklearn.decomposition import PCA
pca = PCA(n_components=ncomps)
pc = pca.fit_transform(df_counts)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + dataset + "/", root=2,
root_user=root_user, preserve_disconnected=True, dataset='toy') # *.4
p0.run_PARC()
super_labels = p0.labels
super_edges = p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
# 0.05 for p1 toobig
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=1, dist_std_local=0.15, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + dataset + "/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
super_terminal_clusters=p0.terminal_clusters)
# in the case of TOY DATA: P1 WORKS MUCH BETTER WHEN ONLY USING SUPER_TERMINAL_CLUS... O/W need to omit pruning
p1.run_PARC()
labels = p1.labels
# p1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# p1.run_PARC()
# labels = p1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=900, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = TSNE().fit_transform(pc) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters, sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(pc.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
#draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Bcell():
def run_zheng(adata, min_counts=3, n_top_genes=500, do_log=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all'
)
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False
)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
sc.pp.scale(adata) # scale to unit variance and shift to zero mean
return adata
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
# print('norm df', norm_df_pal)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c23' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, ncomps, knn)
plt.show()
def find_time(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
Bcell = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_count_table.txt', sep='\t')
gene_name = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
# for i in Bcell_columns:
# print(i)
# adata_counts.var_names_make_unique()
time_list = [find_time(s) for s in Bcell_columns]
ID_list = [find_cellID(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
# sc.pp.filter_genes(adata_counts, min_counts=3)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
adata_counts = run_zheng(adata_counts, n_top_genes=1000, min_counts=10, do_log=True)
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
ncomps = 100 # (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
knn = 20
random_seed = 1
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
embedding = umap.UMAP(random_state=42, n_neighbors=12, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
'''
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for i in list(set(time_list)):
loc = np.where(np.asarray(time_list) == i)
ax1.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap_[color_dict[i]], alpha=1, label=str(i))
ax1.set_title('true labels')
ax1.legend()
for i in range(embedding.shape[0]):
ax2.scatter(embedding[i, 0], embedding[i, 1], c='blue', alpha=0.5)
ax2.text(embedding[i, 0], embedding[i, 1], str(i))
'''
'''
for i, j in enumerate(list(set(ID_list))):
loc = np.where(np.asarray(ID_list) == j)
if 'r'in j: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j), edgecolors = 'black' )
else: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j))
'''
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
# run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + 'bcell' + "/", root=2,
root_user=0, preserve_disconnected=True, random_seed=random_seed) # *.4#root_user = 34
p0.run_PARC()
super_labels = p0.labels
'''
umap_init_ = p0.graph_node_pos
umap_init_ = np.asarray(umap_init_)
umap_init = np.random.rand(len(super_labels),2)
for clus_i in range(umap_init_.shape[0]):
loc_clus_i = np.where(np.asarray(super_labels) == clus_i)[0]
umap_init[loc_clus_i,0]=umap_init_[clus_i,0]
umap_init[loc_clus_i, 1] = umap_init_[clus_i, 1]
'''
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=100, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(30)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=1, dist_std_local=0.15, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + "bcell/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=0,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=p0.terminal_clusters, random_seed=random_seed)
# in the case of TOY DATA: P1 WORKS MUCH BETTER WHEN ONLY USING SUPER_TERMINAL_CLUS... O/W need to omit pruning
p1.run_PARC()
labels = p1.labels
super_edges = p0.edgelist
print('p1 markov times', p1.markov_hitting_times)
print('p1 markov times', p1.single_cell_pt_markov)
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
for gene_name in Bcell_marker_gene_list:
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
print('loc gata', loc_gata)
magic_ad = adata_counts_unfiltered.X[:, loc_gata]
p1.get_gene_expression(magic_ad, gene_name)
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=900, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:5]) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.arange(0, len(labels)) # np.random.randint(len(labels), size=len(labels))
sc_pt_markov = p1.single_cell_pt_markov
# embedding = umap.UMAP(random_state=42, n_neighbors=15, init=umap_init).fit_transform( adata_counts.obsm['X_pca'][:, 0:5])
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Markov Hitting Times (polyfit)', ncomp=ncomps)
plt.show()
'''
draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main():
dataset = 'Human'#'bcell'##''Human' # 'Toy'
if dataset == 'Human':
main_Human(ncomps=100, knn=30, p0_random_seed=4, run_palantir_func=False)
elif dataset == 'bcell':
main_Bcell()
else:
mainToy()
if __name__ == '__main__':
main()
| 50.970043 | 310 | 0.602374 | import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, csgraph
import scipy
import igraph as ig
import leidenalg
import time
import hnswlib
import matplotlib.pyplot as plt
import matplotlib
import math
import multiprocessing
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy import sparse
from sklearn.metrics.pairwise import euclidean_distances
import umap
import scanpy as sc
from MulticoreTSNE import MulticoreTSNE as TSNE
import random
from scipy.sparse.csgraph import connected_components
import pygam as pg
import matplotlib.colors as colors
import matplotlib.cm as cm
import palantir
def plot_sc_pb(ax, embedding, prob, ti):
alize(vmin=0, vmax=np.max(prob))
prob = np.asarray(prob)
c = cmap(norm(prob))
c = c.reshape(-1, 4)
loc_c = np.where(prob <= 0.3)[0]
c[loc_c, 3] = 0.2
loc_c = np.where((prob > 0.3) & (prob <= 0.5))[0]
c[loc_c, 3] = 0.5
loc_c = np.where((prob > 0.5) & (prob <= 0.7))[0]
c[loc_c, 3] = 0.8
loc_c = np.where((prob >0.7))[0]
c[loc_c, 3] = 0.8
ax.scatter(embedding[:, 0], embedding[:, 1], c=c, s=10, cmap='viridis',
edgecolors='none')
ax.set_title('Target: ' + str(ti))
def simulate_multinomial(vmultinomial):
r = np.random.uniform(0.0, 1.0)
CS = np.cumsum(vmultinomial)
CS = np.insert(CS, 0, 0)
m = (np.where(CS < r))[0]
nextState = m[len(m) - 1]
return nextState
def sc_loc_ofsuperCluster_PCAspace(p0, p1,idx):
print("dict of terminal state pairs, Super: sub: ", p1.dict_terminal_super_sub_pairs)
p0_labels = np.asarray(p0.labels)
p1_labels = np.asarray(p1.labels)
p1_sc_markov_pt = p1.single_cell_pt_markov
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0)
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20)
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
else:
loc_i = np.where(p0_labels == ci)[0]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
X_ds = p0.data[idx]
p_ds = hnswlib.Index(space='l2', dim=p0.data.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
new_superclust_index_ds = []
for item in ci_list:
labelsq, distances = p_ds.knn_query(p0.data[item, :], k=1)
new_superclust_index_ds.append(labelsq[0][0])
return new_superclust_index_ds
def sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx):
knn_hnsw = hnswlib.Index(space='l2', dim=embedding.shape[1])
knn_hnsw.init_index(max_elements=embedding.shape[0], ef_construction=200, M=16)
knn_hnsw.add_items(embedding)
knn_hnsw.set_ef(50)
p0_labels = np.asarray(p0.labels)[idx]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_markov_pt = list(np.asarray(p1.single_cell_pt_markov)[idx])
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 80)
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20)
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
else:
loc_i = np.where(p0_labels == ci)[0]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distancesq = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
ci_list.append(labelsq[0][0])
return knn_hnsw, ci_list
def draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, G, idx, X_data):
# knn_hnsw is the knn made in the embedded space used for query
# X_data is the PCA space with all samples
# idx is the selected indices of the downsampled samples
y_root = []
x_root = []
root1_list = []
p1_sc_bp = p1.single_cell_bp[idx, :]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
p1_cc = p1.connected_comp_labels
X_ds = X_data[idx, :]
p_ds = hnswlib.Index(space='l2', dim=X_ds.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for ii, r_i in enumerate(p1.root):
loc_i = np.where(p1_labels == p1.root[ii])[0]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labels_root, distances_root = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # sc location in embedded space of root cell
x_root.append(embedding[labels_root, 0][0])
y_root.append(embedding[labels_root, 1][0])
labelsroot1, distances1 = p1.knn_struct.knn_query(X_ds[labels_root[0][0], :],
k=1) # index of sc-root-cell in the full-PCA space. Need for path
root1_list.append(labelsroot1[0][0])
# single-cell branch probability evolution probability
for i, ti in enumerate(p1.terminal_clusters):
print('i, ti, p1.root, p1.connected', i, ti, p1.root, p1_cc)
print('root1list', root1_list)
root_i = p1.root[p1_cc[ti]]
xx_root = x_root[p1_cc[ti]]
yy_root = y_root[p1_cc[ti]]
fig, ax = plt.subplots()
plot_sc_pb(ax, embedding, p1_sc_bp[:, i], ti)
loc_i = np.where(p1_labels == ti)[0]
val_pt = [p1_sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in
loc_i] # location of sc nearest to average location of terminal clus in the EMBEDDED space
y = [embedding[yi, 1] for yi in loc_i]
labels, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # knn_hnsw is knn of embedded space
x_sc = embedding[labels[0], 0] # terminal sc location in the embedded space
y_sc = embedding[labels[0], 1]
start_time = time.time()
labelsq1, distances1 = p1.knn_struct.knn_query(X_ds[labels[0][0], :],
k=1) # find the nearest neighbor in the PCA-space full graph
print('labels root and labels[0]', root1_list[p1_cc[ti]], labels[0])
## path = G.get_shortest_paths(labels_root[0][0], to=labels[0][0], weights='weight') #G is the knn of all sc points
# path = G.get_shortest_paths(labelsroot1[0][0], to=labelsq1[0][0], weights='weight') # G is the knn of all sc points
path = G.get_shortest_paths(root1_list[p1_cc[ti]], to=labelsq1[0][0],
weights='weight') # G is the knn of all sc points
path_idx = [] # find the single-cell which is nearest to the average-location of a terminal cluster
# get the nearest-neighbor in this downsampled PCA-space graph. These will make the new path-way points
for pii in path[0]:
labelsq, distances = p_ds.knn_query(X_data[pii, :], k=1)
# print('location of pathway point in idx-space', labelsq[0][0])
path_idx.append(labelsq[0][0])
print(f"get_shortest_paths time: {time.time()-start_time}")
print('path', path)
print('new path indices', path_idx)
path = path_idx
n_orange = len(path)
orange_m = np.zeros((n_orange, 3))
for enum_point, point in enumerate(path):
#ax.text(embedding[point, 0], embedding[point, 1], 'D ' + str(enum_point), color='blue', fontsize=8)
orange_m[enum_point, 0] = embedding[point, 0]
orange_m[enum_point, 1] = embedding[point, 1]
orange_m[enum_point, 2] = p1_sc_pt_markov[ point]
from sklearn.neighbors import NearestNeighbors
k_orange = 3 # increasing can smoothen in simple trajectories (Toy)
nbrs = NearestNeighbors(n_neighbors=k_orange, algorithm='ball_tree').fit(orange_m[:, 0:])
distances, indices = nbrs.kneighbors(orange_m[:, 0:])
row_list = []
col_list = []
dist_list = []
for i_or in range(n_orange):
for j_or in range(1, k_orange):
row_list.append(i_or)
col_list.append(indices[i_or, j_or])
dist_list.append(distances[i_or, j_or])
print('target number ' + str(ti))
orange_adjacency_knn = csr_matrix((np.array(dist_list), (np.array(row_list), np.array(col_list))),
shape=(n_orange, n_orange))
print('orange adj knn shape', orange_adjacency_knn.shape)
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False, return_labels=True)
for enum_point, point in enumerate(path): # [0]):
orange_m[enum_point, 2] = p1_sc_pt_markov[point] * p1_sc_pt_markov[
point] * 2 # p1.single_cell_pt_markov[point] * p1.single_cell_pt_markov[point]*2
while n_mst > 1:
comp_root = comp_labels_mst[0]
# print('comp-root', comp_root)
min_ed = 9999999
loc_comp_i = np.where(comp_labels_mst == comp_root)[0]
loc_comp_noti = np.where(comp_labels_mst != comp_root)[0]
# print('compi', loc_comp_i)
# print('comp_noti', loc_comp_noti)
orange_pt_val = [orange_m[cc, 2] for cc in loc_comp_i]
loc_comp_i_revised = [loc_comp_i[cc] for cc in range(len(orange_pt_val)) if
orange_pt_val[cc] >= np.percentile(orange_pt_val, 70)]
for nn_i in loc_comp_i_revised:
ed = euclidean_distances(orange_m[nn_i, :].reshape(1, -1), orange_m[loc_comp_noti])
if np.min(ed) < min_ed:
ed_where_min = np.where(ed[0] == np.min(ed))[0][0]
# print('ed where min', ed_where_min, np.where(ed[0] == np.min(ed)))
min_ed = np.min(ed)
ed_loc_end = loc_comp_noti[ed_where_min]
ed_loc_start = nn_i
# print('min ed', min_ed)
print('Connecting components before sc-bp-GAM: the closest pair of points', ed_loc_start, ed_loc_end)
orange_adjacency_knn[ed_loc_start, ed_loc_end] = min_ed
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False,
return_labels=True)
if n_mst == 1: #if no disconnected components in the graph
(orange_sources, orange_targets) = orange_adjacency_knn.nonzero()
orange_edgelist = list(zip(orange_sources.tolist(), orange_targets.tolist()))
G_orange = ig.Graph(n=orange_adjacency_knn.shape[0], edges=orange_edgelist,
edge_attrs={'weight': orange_adjacency_knn.data.tolist()}, )
path_orange = G_orange.get_shortest_paths(0, to=orange_adjacency_knn.shape[0] - 1, weights='weight')[0]
print('path orange', path_orange)
len_path_orange = len(path_orange)
for path_i in range(len_path_orange - 1):
path_x_start = orange_m[path_orange[path_i], 0]
path_x_end = orange_m[path_orange[path_i + 1], 0]
orange_x = [orange_m[path_orange[path_i], 0], orange_m[path_orange[path_i + 1], 0]]
orange_minx = min(orange_x)
orange_maxx = max(orange_x)
orange_y = [orange_m[path_orange[path_i], 1], orange_m[path_orange[path_i + 1], 1]]
orange_miny = min(orange_y)
orange_maxy = max(orange_y)
orange_embedding_sub = embedding[
((embedding[:, 0] <= orange_maxx) & (embedding[:, 0] >= orange_minx)) & (
(embedding[:, 1] <= orange_maxy) & ((embedding[:, 1] >= orange_miny)))]
print('orange sub size', orange_embedding_sub.shape)
if (orange_maxy - orange_miny > 5) | (orange_maxx - orange_minx > 5):
orange_n_reps = 150
else:
orange_n_reps = 100
or_reps = np.repeat(np.array([[orange_x[0], orange_y[0]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
or_reps = np.repeat(np.array([[orange_x[1], orange_y[1]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
orangeGam = pg.LinearGAM(n_splines=8, spline_order=3, lam=10).fit(orange_embedding_sub[:, 0],
orange_embedding_sub[:, 1])
nx_spacing = 100
orange_GAM_xval = np.linspace(orange_minx, orange_maxx, nx_spacing * 2)
yg_orange = orangeGam.predict(X=orange_GAM_xval)
ax.plot(orange_GAM_xval, yg_orange, color='dimgrey', linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
cur_x1 = orange_GAM_xval[-1]
cur_y1 = yg_orange[-1]
cur_x2 = orange_GAM_xval[0]
cur_y2 = yg_orange[0]
if path_i >= 1:
for mmddi in range(2):
xy11 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy12 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
xy21 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy22 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
mmdd_temp_array = np.asarray([xy11, xy12, xy21, xy22])
mmdd_loc = np.where(mmdd_temp_array == np.min(mmdd_temp_array))[0][0]
if mmdd_loc == 0:
ax.plot([cur_x1, prev_x1], [cur_y1, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 1:
ax.plot([cur_x1, prev_x2], [cur_y1, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 2:
ax.plot([cur_x2, prev_x1], [cur_y2, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 3:
ax.plot([cur_x2, prev_x2], [cur_y2, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if (path_x_start > path_x_end): direction_arrow_orange = -1 # going LEFT
if (path_x_start <= path_x_end): direction_arrow_orange = 1 # going RIGHT
if (abs(
path_x_start - path_x_end) > 2.5): # |(abs(orange_m[path_i, 2] - orange_m[path_i + 1, 1]) > 1)):
if (direction_arrow_orange == -1): # & :
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing - 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing - 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5, color='dimgray', zorder=3)
if (direction_arrow_orange == 1): # &(abs(orange_m[path_i,0]-orange_m[path_i+1,0])>0.5):
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing + 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing + 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5,
color='dimgray', zorder=3)
prev_x1 = cur_x1
prev_y1 = cur_y1
prev_x2 = cur_x2
prev_y2 = cur_y2
ax.scatter(x_sc, y_sc, color='pink', zorder=3, label=str(ti), s=22)
ax.text(x_sc + 0.5, y_sc + 0.5, 'TS ' + str(ti), color='black')
return
def get_biased_weights(edgelist, weights, pt, round_no=1):
# print('weights', type(weights), weights)
# small nu means less biasing (0.5 is quite mild)
# larger nu (in our case 1/nu) means more aggressive biasing https://en.wikipedia.org/wiki/Generalised_logistic_function
print(len(edgelist), len(weights))
bias_weight = []
if round_no == 1:
b = 1 # 1 # 0.5
else:
b = 20 # 20 twenty is used for all the CD34 Human cells
K = 1
c = 0
C = 1
nu = 1
high_weights_th = np.mean(weights)
high_pt_th = np.percentile(np.asarray(pt), 80)
loc_high_weights = np.where(weights > high_weights_th)[0]
loc_high_pt = np.where(np.asarray(pt) > high_pt_th)[0]
print('weight hi th', high_weights_th)
print('loc hi pt', loc_high_pt)
# print('loc hi weight', loc_high_weights)
print('edges of high weight', [edgelist[i] for i in loc_high_weights])
edgelist_hi = [edgelist[i] for i in loc_high_weights]
for i in loc_high_weights:
# print('loc of high weight along edgeweight', i)
start = edgelist[i][0]
end = edgelist[i][1]
# print('start and end node', start, end)
if (start in loc_high_pt) | (end in loc_high_pt):
# print("found a high pt high weight node", (start, end), pt[start], pt[end])
weights[i] = 0.5 * np.mean(weights)
upper_lim = np.percentile(weights, 90) # 80
lower_lim = np.percentile(weights, 10) # 20
weights = [i if i <= upper_lim else upper_lim for i in weights]
weights = [i if i >= lower_lim else lower_lim for i in weights]
for i, (start, end) in enumerate(edgelist):
# print('i, start, end', i, start, end)
Pt_a = pt[start]
Pt_b = pt[end]
P_ab = weights[i]
t_ab = Pt_a - Pt_b
Bias_ab = K / ((C + math.exp(b * (t_ab + c)))) ** nu
new_weight = (Bias_ab * P_ab)
bias_weight.append(new_weight)
# print('tab', t_ab, 'pab', P_ab, 'biased_pab', new_weight)
print('original weights', len(weights), list(enumerate(zip(edgelist, weights))))
print('bias weights', list(enumerate(zip(edgelist, bias_weight))))
print('length bias weights', len(bias_weight))
# bias_weight=np.asarray(bias_weight)
# bias_weight = (bias_weight-np.min(bias_weight)+0.1)/(np.max(bias_weight)-np.min(bias_weight)+0.1)
return list(bias_weight)
def expected_num_steps(start_i, N):
n_t = N.shape[0]
N_steps = np.dot(N, np.ones(n_t))
n_steps_i = N_steps[start_i]
return n_steps_i
def absorption_probability(N, R, absorption_state_j):
M = np.dot(N, R)
vec_prob_end_in_j = M[:, absorption_state_j]
return M, vec_prob_end_in_j
def most_likely_path(P_transition_absorbing_markov, start_i, end_i):
graph_absorbing_markov = 0 # ig() log weight them
shortest_path = graph_absorbing_markov.shortest_path(start_i, end_i)
print('the shortest path beginning at ', start_i, 'and ending in ', end_i, 'is:')
return shortest_path
def draw_trajectory_gams(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal, sub_terminal_clusters,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(0).astype(
int)
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
print('final_super_terminal', final_super_terminal)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
print('pt sub', pt_sub[0:20])
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start]['x'].values # groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start]['y'].values # .groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end]['x'].values # .groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end]['y'].values # groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[
0] # np.where((X_dimred[:,0]<=maxx) & (X_dimred[:,0]>=minx))#
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[
0] # np.where((X_dimred[:,1]<=maxy) & (X_dimred[:,1]>=miny))#
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep] # X_dimred[idx_keep,0]#
y_val = y_val[idx_keep] # X_dimred[idx_keep,1]# y_val[idx_keep]
print('start and end', start, '', end)
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
print('dist', dist)
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
print('midpoint loc', midpoint_loc)
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
noise = 0.05
print(midpoint_xy, 'is the midpoint between clus', pt[start], 'and ', pt[end])
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
x_val = x_val.reshape((len(x_val), -1))
y_val = y_val.reshape((len(y_val), -1))
xp = np.linspace(minx, maxx, 500)
gam50 = pg.LinearGAM(n_splines=4, spline_order=3, lam=10).gridsearch(x_val, y_val)
XX = gam50.generate_X_grid(term=0, n=500)
preds = gam50.predict(XX)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
# cc = ['black', 'red', 'blue', 'yellow', 'pink'][random.randint(0, 4)]
ax2.plot(XX, preds, linewidth=1, c='dimgray')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc + step] - xp[closest_loc],
preds[closest_loc + step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc - step] - xp[closest_loc],
preds[closest_loc - step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
pen_color = []
super_cluster_label = []
terminal_count_ = 0
dot_size = []
for i in range(len(set(super_cluster_labels))):
if i in final_super_terminal:
print('super cluster', i, 'is a super terminal with sub_terminal cluster',
sub_terminal_clusters[terminal_count_])
width_edge.append(2)
c_edge.append('yellow')
pen_color.append('black')
super_cluster_label.append('TS' + str(sub_terminal_clusters[terminal_count_]))
dot_size.append(60)
terminal_count_ = terminal_count_ + 1
else:
width_edge.append(0)
c_edge.append('black')
pen_color.append('grey')
super_cluster_label.append('')
dot_size.append(40)
# ax2.scatter(x_cluster, y_cluster, c='red') #doesnt visualize as well to just take the embedding cluster-mean x,y values
# text annotations for the super cluster locations
# for i, type in enumerate(pt_str):
# ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
# for i in range(len(x_cluster)):
# ax2.text(x_cluster[i], y_cluster[i], 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
# ax2.set_title('super_knn:' + str(knn) )
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
# ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors = c_edge, linewidth = width_edge)
count_ = 0
for i, c, w, pc, dsz in zip(sc_supercluster_nn, c_edge, width_edge, pen_color, dot_size):
ax2.scatter(X_dimred[i, 0], X_dimred[i, 1], c='black', s=dsz, edgecolors=c, linewidth=w)
ax2.text(X_dimred[i, 0] + 0.5, X_dimred[i, 1] + 0.5, super_cluster_label[count_],
color=pc) # using the SC_NN location is good
count_ = count_ + 1
plt.title(title_str)
return
def draw_trajectory_dimred(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(1).astype(
int)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[0]
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[0]
print('len x-val before intersect', len(x_val))
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep]
y_val = y_val[idx_keep]
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
print('dist', dist)
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
print('midpoint loc', midpoint_loc)
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
# midpoint_xy = list_selected_clus[midpoint_loc]
noise = 0.05
print(midpoint_xy, 'is the midpoint between clus', pt[start], 'and ', pt[end])
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
z = np.polyfit(x_val, y_val, 2)
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
ax2.plot(xp[idx_keep], smooth[idx_keep], linewidth=3, c='dimgrey')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc + step] - xp[closest_loc],
smooth[closest_loc + step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc - step] - xp[closest_loc],
smooth[closest_loc - step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
for i in range(num_parc_group):
if i in final_super_terminal:
width_edge.append(2.5)
c_edge.append('yellow')
else:
width_edge.append(0)
c_edge.append('black')
ax2.scatter(x_cluster, y_cluster, c='red')
for i, type in enumerate(pt_str):
ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
for i in range(len(x_cluster)):
ax2.text(x_cluster[i], y_cluster[i], pt_sub[i] + 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors=c_edge, linewidth=width_edge)
plt.title(title_str)
return
def csr_mst(adjacency_matrix):
# return minimum spanning tree from adjacency matrix (csr)
Tcsr = adjacency_matrix.copy()
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components before mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
Tcsr.data = -1 * Tcsr.data
Tcsr.data = Tcsr.data - np.min(Tcsr.data)
Tcsr.data = Tcsr.data + 1
print('len Tcsr data', len(Tcsr.data))
Tcsr = minimum_spanning_tree(Tcsr) # adjacency_matrix)
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components after mst', n_components_mst)
Tcsr = (Tcsr + Tcsr.T) * 0.5 # make symmetric
print('number of components after symmetric mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
return Tcsr
def connect_all_components(MSTcsr, cluster_graph_csr, adjacency_matrix):
# connect forest of MSTs (csr)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
while n_components > 1:
sub_td = MSTcsr[comp_labels == 0, :][:, comp_labels != 0]
print('minimum value of link connecting components', np.min(sub_td.data))
locxy = scipy.sparse.find(MSTcsr == np.min(sub_td.data))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] == 0) & (comp_labels[locxy[1][i]] != 0):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
cluster_graph_csr[x, y] = minval
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected componnents after reconnecting ', n_components)
return cluster_graph_csr
def local_pruning_clustergraph_mst(adjacency_matrix, global_pruning_std=1, max_outgoing=30, preserve_disconnected=True):
# larger pruning_std factor means less pruning
# the mst is only used to reconnect components that become disconnect due to pruning
from scipy.sparse.csgraph import minimum_spanning_tree
Tcsr = csr_mst(adjacency_matrix)
initial_links_n = len(adjacency_matrix.data)
n_components_0, comp_labels_0 = connected_components(csgraph=adjacency_matrix, directed=False, return_labels=True)
print('number of components before pruning', n_components_0, comp_labels_0)
adjacency_matrix = scipy.sparse.csr_matrix.todense(adjacency_matrix)
row_list = []
col_list = []
weight_list = []
neighbor_array = adjacency_matrix # not listed in in any order of proximity
n_cells = neighbor_array.shape[0]
rowi = 0
for i in range(neighbor_array.shape[0]):
row = np.asarray(neighbor_array[i, :]).flatten()
# print('row, row')
n_nonz = np.sum(row > 0)
# print('n nonzero 1', n_nonz)
n_nonz = min(n_nonz, max_outgoing)
to_keep_index = np.argsort(row)[::-1][0:n_nonz] # np.where(row>np.mean(row))[0]#
# print('to keep', to_keep_index)
updated_nn_weights = list(row[to_keep_index])
for ik in range(len(to_keep_index)):
row_list.append(rowi)
col_list.append(to_keep_index[ik])
dist = updated_nn_weights[ik]
weight_list.append(dist)
rowi = rowi + 1
final_links_n = len(weight_list)
print('final links n', final_links_n)
cluster_graph_csr = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = cluster_graph_csr.nonzero()
mask = np.zeros(len(sources), dtype=bool)
cluster_graph_csr.data = cluster_graph_csr.data / (np.std(cluster_graph_csr.data)) # normalize
threshold_global = np.mean(cluster_graph_csr.data) - global_pruning_std * np.std(cluster_graph_csr.data)
mask |= (cluster_graph_csr.data < (threshold_global)) # smaller Jaccard weight means weaker edge
cluster_graph_csr.data[mask] = 0
cluster_graph_csr.eliminate_zeros()
print('shape of cluster graph', cluster_graph_csr.shape)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected components after pruning', n_components)
if (preserve_disconnected == True) & (n_components > n_components_0): # preserve initial disconnected components
Td = Tcsr.todense()
Td[Td == 0] = 999.999
n_components_ = n_components
while n_components_ > n_components_0:
for i in range(n_components_0):
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('locx', loc_x, len_i)
while len_i > 1:
s = list(set(comp_labels[loc_x]))
loc_notxx = np.intersect1d(loc_x, np.where((comp_labels != s[0]))[0])
# print('loc_notx', loc_notxx)
loc_xx = np.intersect1d(loc_x, np.where((comp_labels == s[0]))[0])
sub_td = Td[loc_xx, :][:, loc_notxx]
# print('subtd-min', np.min(sub_td))
locxy = np.where(Td == np.min(sub_td))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] != comp_labels[locxy[1][i]]):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
print('inside reconnecting components while preserving original ', x, y, minval)
cluster_graph_csr[x, y] = minval
n_components_, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False,
return_labels=True)
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('number of connected componnents after reconnecting ', n_components_)
sources, targets = cluster_graph_csr.nonzero()
edgelist = list(zip(sources, targets))
edgeweights = cluster_graph_csr.data / (np.std(cluster_graph_csr.data))
trimmed_n = (initial_links_n - final_links_n) * 100 / initial_links_n
trimmed_n_glob = (initial_links_n - len(edgeweights)) / initial_links_n
if global_pruning_std < 0.5:
print("percentage links trimmed from local pruning relative to start", trimmed_n)
print("percentage links trimmed from global pruning relative to start", trimmed_n_glob)
return edgeweights, edgelist, comp_labels
def get_sparse_from_igraph(graph, weight_attr=None):
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
class PARC:
def __init__(self, data, true_label=None, anndata=None, dist_std_local=2, jac_std_global='median',
keep_all_local_dist='auto',
too_big_factor=0.4, small_pop=10, jac_weighted_edges=True, knn=30, n_iter_leiden=5, random_seed=42,
num_threads=-1, distance='l2', time_smallpop=15, pseudotime=False,
root=0, path='/home/shobi/Trajectory/', super_cluster_labels=False,
super_node_degree_list=False, super_terminal_cells=False, x_lazy=0.95, alpha_teleport=0.99,
root_user="root_cluster", preserve_disconnected=True, dataset="humanCD34", super_terminal_clusters=[], do_magic=False):
# higher dist_std_local means more edges are kept
# highter jac_std_global means more edges are kept
if keep_all_local_dist == 'auto':
if data.shape[0] > 300000:
keep_all_local_dist = True # skips local pruning to increase speed
else:
keep_all_local_dist = False
self.data = data
self.true_label = true_label
self.anndata = anndata
self.dist_std_local = dist_std_local
self.jac_std_global = jac_std_global ##0.15 is also a recommended value performing empirically similar to 'median'
self.keep_all_local_dist = keep_all_local_dist
self.too_big_factor = too_big_factor ##if a cluster exceeds this share of the entire cell population, then the PARC will be run on the large cluster. at 0.4 it does not come into play
self.small_pop = small_pop # smallest cluster population to be considered a community
self.jac_weighted_edges = jac_weighted_edges
self.knn = knn
self.n_iter_leiden = n_iter_leiden
self.random_seed = random_seed # enable reproducible Leiden clustering
self.num_threads = num_threads # number of threads used in KNN search/construction
self.distance = distance # Euclidean distance 'l2' by default; other options 'ip' and 'cosine'
self.time_smallpop = time_smallpop
self.pseudotime = pseudotime
self.root = root
self.path = path
self.super_cluster_labels = super_cluster_labels
self.super_node_degree_list = super_node_degree_list
self.super_terminal_cells = super_terminal_cells
self.x_lazy = x_lazy # 1-x = probability of staying in same node
self.alpha_teleport = alpha_teleport # 1-alpha is probability of jumping
self.root_user = root_user
self.preserve_disconnected = preserve_disconnected
self.dataset = dataset
self.super_terminal_clusters = super_terminal_clusters
self.do_magic = do_magic
def get_terminal_clusters(self, A, markov_pt, root_ai):
n_ = A.shape[0]
if n_ <= 10: n_outlier_std = 3
if (n_ <= 40) & (n_ > 10):n_outlier_std = 2
if n_>=40: n_outlier_std = 1
pop_list = []
print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
# make an igraph graph to compute the closeness
g_dis = ig.Graph.Adjacency((A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
g_dis.es['weights'] = 1/A_new[A_new.nonzero()] #we want "distances" not weights for closeness and betweeness
betweenness_score = g_dis.betweenness(weights = 'weights')
betweenness_score_array = np.asarray(betweenness_score)
betweenness_score_takeout_outlier = betweenness_score_array[betweenness_score_array<(np.mean(betweenness_score_array)+n_outlier_std*np.std(betweenness_score_array))]
betweenness_list = [ i for i, score in enumerate(betweenness_score) if score < (np.mean(betweenness_score_takeout_outlier) - 0 * np.std(betweenness_score_takeout_outlier))]
closeness_score = g_dis.closeness( mode='ALL', cutoff=None, weights='weights', normalized=True)
closeness_score_array = np.asarray( closeness_score)
closeness_score_takeout_outlier = closeness_score_array[closeness_score_array < (np.mean( closeness_score_array) + n_outlier_std * np.std( closeness_score_array))]
closeness_list = [i for i, score in enumerate(closeness_score) if
score < (np.mean(closeness_score_takeout_outlier) - 0 * np.std(closeness_score_takeout_outlier))]
print('closeness_score ', [(i, score) for i, score in enumerate(closeness_score)])
print('closeness_score shortlist', closeness_list)
print('betweeness_score ', [(i,score) for i, score in enumerate(betweenness_score)])
print('betweeness_score shortlist', betweenness_list)
# make an igraph graph to compute the closeness
#g_ = ig.Graph.Adjacency( (A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
#g_.es['weights'] =A_new[A_new.nonzero()] # we want "distances" not weights for closeness and betweeness
#eig_cent_score = g_.evcent(weights='weights',scale = False, directed = True)
#print('eigcent', eig_cent_score)
#eig_cent_list = [i for i, score in enumerate(eig_cent_score) if score < (np.mean(eig_cent_score) - 0 * np.std(eig_cent_score))]
#print('eigcent shortlist', eig_cent_list)
out_deg = A_new.sum(axis=1)
in_deg = A_new.sum(axis=0)
# for pi, item in enumerate(out_deg):
# out_list.append(item/pop_list[i])
out_deg = np.asarray(out_deg)
# print('out deg', out_deg)
print('number of clusters', n_)
if n_ <= 10:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0]
print('low deg super', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 60))[
0] # 60 Ttoy #10 for human but not sure ever in play
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
print('high pt super', loc_pt)
if (n_ <= 40) & (n_ > 10):
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[
0] # np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0]#np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0] # 30 for Toy #was 50 for Human
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 20))[0]
print('low deg super', loc_deg)
print('low in-deg super', loc_deg_in)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 10))[0] # 60 Toy
print('high pt super', loc_pt)
if n_ > 40:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0] # 15 Toy
print('low deg', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 30))[0] # 60Toy
print('high pt', loc_pt)
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
#terminal_clusters = list(set(loc_deg) | set(loc_deg_in))
#terminal_clusters = list(set(closeness_list) & set(loc_pt))
terminal_clusters_1 = list(set(closeness_list)&set(betweenness_list))
terminal_clusters_2 = list(set(closeness_list) & set(loc_deg))
terminal_clusters_3 = list(set(betweenness_list) & set(loc_deg))
terminal_clusters = list(set(terminal_clusters_1)|set(terminal_clusters_2))
terminal_clusters = list(set(terminal_clusters)|set(terminal_clusters_3))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
terminal_org = terminal_clusters.copy()
print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
removed_terminal_i = False
# print('terminal state', terminal_i)
count_nn = 0
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
# print('terminal state', terminal_i)
if item in terminal_clusters:
print('item and terminal',
item, terminal_clusters)
count_nn = count_nn + 1
if item == root_ai: # if the terminal state is a neighbor of
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >= 3:
if removed_terminal_i == False: terminal_clusters.remove(terminal_i)
print('TS', terminal_i, 'had 3 or more neighboring terminal states')
print('terminal_clusters', terminal_clusters)
return terminal_clusters
def get_terminal_clusters_old(self, A, markov_pt, root_ai):
pop_list = []
print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
out_deg = A_new.sum(axis=1)
in_deg = A_new.sum(axis=0)
# for pi, item in enumerate(out_deg):
# out_list.append(item/pop_list[i])
out_deg = np.asarray(out_deg)
print('out deg', out_deg)
n_ = A.shape[0]
print('number of clusters', n_)
if n_ <= 10:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0]
print('low deg super', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 60))[
0] # 60 Ttoy #10 for human but not sure ever in play
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
print('high pt super', loc_pt)
if (n_ <= 40) & (n_ > 10):
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[
0] # np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0]#np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0] # 30 for Toy #was 50 for Human
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 20))[0]
print('low deg super', loc_deg)
print('low in-deg super', loc_deg_in)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 30))[0] # 60 Toy
print('high pt super', loc_pt)
if n_ > 40:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 30))[0] # 15 Toy
print('low deg', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 40))[0] # 60Toy
print('high pt', loc_pt)
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
terminal_clusters = list(set(loc_deg) | set(loc_deg_in))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
# terminal_clusters.reverse()
terminal_org = terminal_clusters.copy()
print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
removed_terminal_i = False
# print('terminal state', terminal_i)
count_nn = 0
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
# print('terminal state', terminal_i)
if item in terminal_clusters:
print('item and terminal',
item, terminal_clusters)
count_nn = count_nn + 1
if item == root_ai: # if the terminal state is a neighbor of
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >= 3:
if removed_terminal_i == False: terminal_clusters.remove(terminal_i)
print('TS', terminal_i, 'had 4 or more neighboring terminal states')
print('terminal_clusters', terminal_clusters)
return terminal_clusters
def compute_hitting_time(self, sparse_graph, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = sparse_graph.shape[0]
# print('adjacency in compute hitting', sparse_graph)
# sparse_graph = scipy.sparse.csr_matrix(sparse_graph)
print('start compute hitting')
A = scipy.sparse.csr_matrix.todense(sparse_graph) # A is the adjacency matrix
print('is graph symmetric', (A.transpose() == A).all())
lap = csgraph.laplacian(sparse_graph,
normed=False) # compute regular laplacian (normed = False) to infer the degree matrix where D = L+A
# see example and definition in the SciPy ref https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.laplacian.html
A = scipy.sparse.csr_matrix.todense(lap)
print('is laplacian symmetric', (A.transpose() == A).all())
deg = sparse_graph + lap # Recall that L=D-A (modified for weighted where D_ii is sum of edge weights and A_ij is the weight of particular edge)
deg.data = 1 / np.sqrt(deg.data) ##inv sqrt of degree matrix
deg[deg == np.inf] = 0
norm_lap = csgraph.laplacian(sparse_graph, normed=True) # returns symmetric normalized D^-.5 xL x D^-.5
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
norm_lap = scipy.sparse.csr_matrix.todense(norm_lap)
eig_val, eig_vec = np.linalg.eig(
norm_lap) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
# print('eig val', eig_val.shape, eig_val)
if number_eig == 0: number_eig = eig_vec.shape[1]
# print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig):
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor)
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor)
deg = scipy.sparse.csr_matrix.todense(deg)
temp = Greens_matrix.dot(deg)
temp = deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) al_hitting_times), roundtrip_times
def pagerank_compute(self, P_bias, max_iterations=200):
x_lazy = self.x_lazy
alpha_teleport = self.alpha_teleport
n = P_bias.shape[0]
P_bias = x_lazy * P_bias + (1 - x_lazy) * np.identity(n)
P_bias = alpha_teleport * P_bias + ((1 - alpha_teleport) * (1 / n) * (np.ones((n, n)) - np.identity(n)))
p0 = 1.0 / float(n)
nes((n, 1)) * p0
p0 = p0.T
for iteration in range(max_iterations):
p0 = p0.dot(P_bias)
p0 = p0[0] / np.sum(p0[0])
upperlim = np.percentile(p0, 90)
lowerlim = np.percentile(p0, 10)
if self.too_big_factor < 0.3:
p0 = np.array([d if d <= upperlim else upperlim for d in p0])
p0 = p0 / np.sum(p0)
print('final stationary', [(i, pp0) for i, pp0 in enumerate(p0)])
return p0
def prob_reaching_terminal_state1(self, terminal_state, all_terminal_states, A, root, pt, num_sim,q,cumstateChangeHist, cumstateChangeHist_all,seed):
np.random.seed(seed)
print('root', root)
print('terminal state target', terminal_state)
n_states = A.shape[0]
n_components, labels = connected_components(csgraph=csr_matrix(A), directed=False)
A = A / (np.max(A))
jj = 0
for row in A:
if np.all(row == 0): A[jj, jj] = 1
jj = jj + 1
P = A / A.sum(axis=1).reshape((n_states, 1))
n_steps = int(2* n_states)
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
neigh_terminal = np.where(A[:, terminal_state] > 0)[0]
non_nn_terminal_state = []
for ts_i in all_terminal_states:
if pt[ts_i] > pt[terminal_state]: non_nn_terminal_state.append(ts_i)
for ts_i in all_terminal_states:
if np.all(neigh_terminal != ts_i): non_nn_terminal_state.append(ts_i)
count_reach_terminal_state = 0
count_r = 0
for i in range(num_sim):
stateChangeHist = np.zeros((n_states, n_states))
stateChangeHist[root, root] = 1
state = state_root
currentState = root
stateHist = state
terminal_state_found = False
non_neighbor_terminal_state_reached = False
x = 0
while (x < n_steps) & (
(terminal_state_found == False)):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
if nextState == terminal_state:
terminal_state_found = True
count_r = count_r+1
stateChangeHist[currentState, nextState] += 1
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
stateHist = np.append(stateHist, state, axis=0)
currentState = nextState
x = x + 1
if (terminal_state_found == True):
cumstateChangeHist = cumstateChangeHist + np.any(
stateChangeHist > 0, axis=0)
count_reach_terminal_state = count_reach_terminal_state + 1
cumstateChangeHist_all = cumstateChangeHist_all + np.any(
stateChangeHist > 0, axis=0)
cumstateChangeHist_all[cumstateChangeHist_all == 0] = 1
prob_ = cumstateChangeHist / cumstateChangeHist_all
np.set_printoptions(precision=3)
q.append([cumstateChangeHist, cumstateChangeHist_all])
def simulate_markov_sub(self, A, num_sim, hitting_array, q, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
hitting_array_temp = np.zeros((P.shape[0], 1)).astype('float64')
n_steps = int(2 * n_states)
hitting_array_final = np.zeros((1, n_states))
currentState = root
print('root is', root)
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
for i in range(num_sim):
dist_list = []
state = state_root
currentState = root
stateHist = state
for x in range(n_steps):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
dist = A[currentState, nextState]
dist = (1 / ((1 + math.exp((dist - 1)))))
dist_list.append(dist)
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
currentState = nextState
stateHist = np.append(stateHist, state, axis=0)
for state_i in range(P.shape[0]):
first_time_at_statei = np.where(stateHist[:, state_i] == 1)[0]
if len(first_time_at_statei) == 0:
hitting_array_temp[state_i, 0] = n_steps + 1
else:
total_dist = 0
for ff in range(first_time_at_statei[0]):
total_dist = dist_list[ff] + total_dist
hitting_array_temp[state_i, 0] = total_dist
hitting_array = np.append(hitting_array, hitting_array_temp, axis=1)
hitting_array = hitting_array[:, 1:]
q.append(hitting_array)
def simulate_branch_probability(self, terminal_state, all_terminal_states, A, root, pt, num_sim=300 ):
n_states = A.shape[0]
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs)
print('num_sim_pp', num_sim_pp)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
seed_list = list(range(n_jobs))
for i in range(n_jobs):
cumstateChangeHist = np.zeros((1, n_states))
cumstateChangeHist_all = np.zeros((1, n_states))
process = multiprocessing.Process(target=self.prob_reaching_terminal_state1,args=(terminal_state, all_terminal_states, A, root, pt, num_sim_pp,q, cumstateChangeHist, cumstateChangeHist_all, seed_list[i]))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
cumhistory_vec = q[0][0]
cumhistory_vec_all = q[0][1]
count_reached= cumhistory_vec_all[0,terminal_state]
print('length of q', len(q))
for i in range(1,len(q)):
cumhistory_vec = cumhistory_vec + q[i][0]
cumhistory_vec_all = cumhistory_vec_all+ q[i][1]
t_reached = count_reached+ q[i][1][0,terminal_state]
print('accumulated number of times Terminal state',terminal_state, 'is found:',count_reached)
print('cumhistory_vec', cumhistory_vec)
print('cumhistory_vec_all', cumhistory_vec_all)
cumhistory_vec_all[cumhistory_vec_all == 0] = 1
prob_ = cumhistory_vec /cumhistory_vec_all
np.set_printoptions(precision=3)
print('prob', prob_)
if count_reached == 0:
prob_[:, terminal_state] = 0
print('never reached state', terminal_state)
else:
loc_1 = np.where(prob_ == 1)
print('loc_1', loc_1)
loc_1 = loc_1[1]
print('loc_1', loc_1)
0
prob_ = prob_ / min(1,1.1 * np.max(prob_))
prob_[0, loc_1] = 1
print('np.max', np.max(prob_))
print('scaled prob', prob_)
return list(prob_)[0]
def simulate_markov(self, A, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
x_lazy = self.x_lazy
alpha_teleport = self.alpha_teleport
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
stateHist = state
dfStateHist = pd.DataFrame(state)
distr_hist = np.zeros([1, n_states])
num_sim = 1300 ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs)
print('num_sim_pp', num_sim_pp)
n_steps = int(2 * n_states)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
for i in range(n_jobs):
hitting_array = np.ones((P.shape[0], 1)) * 1000
process = multiprocessing.Process(target=self.simulate_markov_sub,
args=(P, num_sim_pp, hitting_array, q, root))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
print('ended all multiprocesses, will retrieve and reshape')
hitting_array = q[0]
for qi in q[1:]:
hitting_array = np.append(hitting_array, qi, axis=1)
print('finished getting from queue', hitting_array.shape)
hitting_array_final = np.zeros((1, n_states))
no_times_state_reached_array = np.zeros((1, n_states))
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached_array[0, i] = np.sum(rowtemp != (n_steps + 1))
lower_quart = np.percentile(no_times_state_reached_array, 25)
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached = np.sum(rowtemp != (n_steps + 1))
if no_times_state_reached != 0:
perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 15) + 0.001
hitting_array_final[0, i] = np.mean(rowtemp[rowtemp <= perc])
else:
hitting_array_final[0, i] = (n_steps + 1)
print('hitting from sim markov', [(i, val) for i, val in enumerate(hitting_array_final.flatten())])
return hitting_array_final[0]
def compute_hitting_time_onbias(self, laplacian, inv_sqr_deg, root, x_lazy, alpha_teleport, number_eig=0):
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = laplacian.shape[0]
print('is laplacian of biased symmetric', (laplacian.transpose() == laplacian).all())
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
eig_val, eig_vec = np.linalg.eig(
laplacian)
print('eig val', eig_val.shape)
if number_eig == 0: number_eig = eig_vec.shape[1]
print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1
for i in range(start_, number_eig): # 0 instead of 1th eg
# print(i, 'th eigenvalue is', eig_val[i])
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
# print('factor', 1 / factor)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
temp = Greens_matrix.dot(inv_sqr_deg)
temp = inv_sqr_deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def project_hittingtimes_sc(self, pt):
if self.data.shape[0] > 1000:
knn_sc = 30
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
sc_pt = np.zeros((len(self.labels),))
i = 0
for row in neighbor_array:
mean_weight = 0
# print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
# print('neighbor clusters labels', neighboring_clus)
for clus_i in set(list(neighboring_clus)):
hitting_time_clus_i = pt[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
#if clus_i == self.root[0]: print('root is a neighbor', pt[clus_i], 'num NN cells beloning to root', num_clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
mean_weight = mean_weight + hitting_time_clus_i * num_clus_i / knn_sc
# print('mean weight',mean_weight)
sc_pt[i] = mean_weight
#if self.root[0] in set(list(neighboring_clus)): print('the mean sc time for root neighbor is', mean_weight)
i = i + 1
return sc_pt
def project_branch_probability_sc(self, bp_array_clus):
if self.data.shape[0] > 1000:
knn_sc = 10 # 30
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
weight_array = np.zeros((len(self.labels), len(list(set(self.labels)))))
for irow, row in enumerate(neighbor_array):
mean_weight = 0
#print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
print('neighbor clusters labels', neighboring_clus)
for clus_i in set(list(neighboring_clus)):
# hitting_time_clus_i = df_graph[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
wi = num_clus_i / knn_sc
weight_array[irow, clus_i] = wi
# print('mean weight',mean_weight)
#print('rowi of weight array', weight_array[irow,:])
#print('shape weight array', weight_array)
print(weight_array)
bp_array_sc = weight_array.dot(bp_array_clus)
bp_array_sc = bp_array_sc * 1. / np.max(bp_array_sc, axis=0) #divide cell by max value in that column
print('column max:',np.max(bp_array_sc, axis=0))
#print('sc bp array max', np.max(bp_array_sc))
#bp_array_sc = bp_array_sc/np.max(bp_array_sc)
for i, label_ts in enumerate(list(self.terminal_clusters)):
print('set labels', set(labels))
print('set terminal clus' ,set(self.terminal_clusters))
loc_i = np.where(np.asarray(self.labels) == label_ts)[0]
loc_noti = np.where(np.asarray(self.labels) != label_ts)[0]
if np.max(bp_array_sc[loc_noti,i])==1: bp_array_sc[loc_i,i]=1.2
print('terminal cluster', label_ts, len(loc_i), loc_i)
print('sc bp array', bp_array_sc)
self.single_cell_bp = bp_array_sc
return
def make_knn_struct(self, too_big=False, big_cluster=None):
if self.knn > 190: print('please provide a lower K_in for KNN graph construction')
ef_query = max(100, self.knn + 1) # ef always should be >K. higher ef, more accuate query
if too_big == False:
num_dims = self.data.shape[1]
n_elements = self.data.shape[0]
p = hnswlib.Index(space=self.distance, dim=num_dims) # default to Euclidean distance
p.set_num_threads(self.num_threads) # allow user to set threads used in KNN construction
if n_elements < 10000:
ef_param_const = min(n_elements - 10, 500)
ef_query = ef_param_const
print('setting ef_construction to', )
else:
ef_param_const = 200
if num_dims > 30:
p.init_index(max_elements=n_elements, ef_construction=ef_param_const,
M=48) ## good for scRNA seq where dimensionality is high
else:
p.init_index(max_elements=n_elements, ef_construction=200, M=30, )
p.add_items(self.data)
if too_big == True:
num_dims = big_cluster.shape[1]
n_elements = big_cluster.shape[0]
p = hnswlib.Index(space='l2', dim=num_dims)
p.init_index(max_elements=n_elements, ef_construction=200, M=30)
p.add_items(big_cluster)
p.set_ef(ef_query) # ef should always be > k
return p
def make_csrmatrix_noselfloop(self, neighbor_array, distance_array):
local_pruning_bool = not (self.keep_all_local_dist)
if local_pruning_bool == True: print('commencing local pruning based on minkowski metric at',
self.dist_std_local, 's.dev above mean')
row_list = []
col_list = []
weight_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
# print('size neighbor array', neighbor_array.shape)
num_neigh = neighbor_array.shape[1]
distance_array = distance_array
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
rowi = 0
count_0dist = 0
discard_count = 0
if local_pruning_bool == True: # do some local pruning based on distance
for row in neighbor_array:
distlist = distance_array[rowi, :]
to_keep = np.where(distlist <= np.mean(distlist) + self.dist_std_local * np.std(distlist))[0] # 0*std
updated_nn_ind = row[np.ix_(to_keep)]
updated_nn_weights = distlist[np.ix_(to_keep)]
discard_count = discard_count + (num_neigh - len(to_keep))
for ik in range(len(updated_nn_ind)):
if rowi != row[ik]: # remove self-loops
row_list.append(rowi)
col_list.append(updated_nn_ind[ik])
dist = np.sqrt(updated_nn_weights[ik])
if dist == 0:
count_0dist = count_0dist + 1
weight_list.append(dist)
rowi = rowi + 1
if local_pruning_bool == False: # dont prune based on distance
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (1. / (distance_array.flatten() + 0.1)).tolist()
# if local_pruning_bool == True: print('share of neighbors discarded in local distance pruning %.1f' % (discard_count / neighbor_array.size))
csr_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
return csr_graph
def func_mode(self, ll): # return MODE of list
# If multiple items are maximal, the function returns the first one encountered.
return max(set(ll), key=ll.count)
def run_toobig_subPARC(self, X_data, jac_std_toobig=1,
jac_weighted_edges=True):
n_elements = X_data.shape[0]
hnsw = self.make_knn_struct(too_big=True, big_cluster=X_data)
if self.knn >= 0.8 * n_elements:
k = int(0.5 * n_elements)
else:
k = self.knn
neighbor_array, distance_array = hnsw.knn_query(X_data, k=k)
# print('shapes of neigh and dist array', neighbor_array.shape, distance_array.shape)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
sources, targets = csr_array.nonzero()
mask = np.zeros(len(sources), dtype=bool)
mask |= (csr_array.data > (
np.mean(csr_array.data) + np.std(csr_array.data) * 5)) # smaller distance means stronger edge
# print('sum of mask', sum(mask))
csr_array.data[mask] = 0
csr_array.eliminate_zeros()
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist_copy) # list of jaccard weights
new_edgelist = []
sim_list_array = np.asarray(sim_list)
if jac_std_toobig == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_toobig * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
for ii in strong_locs: new_edgelist.append(edgelist_copy[ii])
sim_list_new = list(sim_list_array[strong_locs])
if jac_weighted_edges == True:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
else:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist))
G_sim.simplify(combine_edges='sum')
resolution_parameter = 1
if jac_weighted_edges == True:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
else:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
# print('Q= %.2f' % partition.quality())
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 5: # <10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
do_while_time = time.time()
while (small_pop_exist == True) & (time.time() - do_while_time < 5):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 10:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
self.labels = PARC_labels_leiden
print('finished labels')
# self.anndata.obs['parc_label'] = self.labels
# cma1_cluster = self.anndata.obs.groupby('parc_label').mean('Cma1')
return PARC_labels_leiden
def recompute_weights(self, clustergraph_ig, pop_list_raw):
sparse_clustergraph = get_sparse_from_igraph(clustergraph_ig, weight_attr='weight')
n = sparse_clustergraph.shape[0]
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
weights = sparse_clustergraph.data
# print('edgelist of combined clustergraph', edgelist)
# print('edge weights of combined clustergraph', weights)
new_weights = []
i = 0
for s, t in edgelist:
pop_s = pop_list_raw[s]
pop_t = pop_list_raw[t]
w = weights[i]
nw = w * (pop_s + pop_t) / (pop_s * pop_t) # *
new_weights.append(nw)
# print('old and new', w, nw)
i = i + 1
scale_factor = max(new_weights) - min(new_weights)
wmin = min(new_weights)
# wmax = max(new_weights)
# print('weights before scaling', new_weights)
new_weights = [(wi + wmin) / scale_factor for wi in new_weights]
# print('weights after scaling', new_weights)
sparse_clustergraph = csr_matrix((np.array(new_weights), (sources, targets)),
shape=(n, n))
# print('new weights', new_weights)
# print(sparse_clustergraph)
# print('reweighted sparse clustergraph')
# print(sparse_clustergraph)
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
return sparse_clustergraph, edgelist
def find_root_HumanCD34(self, graph_dense, PARC_labels_leiden, root_idx, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_idx]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_bcell(self, graph_dense, PARC_labels_leiden, root_user, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_user]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root(self, graph_dense, PARC_labels_leiden, root_user, true_labels, super_cluster_labels_sub,
super_node_degree_list):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
min_deg = 1000
super_min_deg = 1000
found_super_and_sub_root = False
found_any_root = False
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
print('deg list', deg_list) # locallytrimmed_g.degree()
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
# print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if self.super_cluster_labels != False:
super_majority_cluster = self.func_mode(list(np.asarray(super_cluster_labels_sub)[cluster_i_loc]))
super_majority_cluster_loc = np.where(np.asarray(super_cluster_labels_sub) == super_majority_cluster)[0]
super_majority_truth = self.func_mode(list(true_labels[super_majority_cluster_loc]))
# print('spr node degree list sub',super_node_degree_list, super_majority_cluster)
super_node_degree = super_node_degree_list[super_majority_cluster]
if (str(root_user) in majority_truth) & (str(root_user) in str(super_majority_truth)):
if super_node_degree < super_min_deg:
# if deg_list[cluster_i] < min_deg:
found_super_and_sub_root = True
root = cluster_i
found_any_root = True
min_deg = deg_list[ci]
super_min_deg = super_node_degree
print('new root is', root, ' with degree', min_deg, 'and super node degree',
super_min_deg)
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
if (self.super_cluster_labels == False) | (found_super_and_sub_root == False):
print('self.super_cluster_labels', super_cluster_labels_sub, ' foundsuper_cluster_sub and super root',
found_super_and_sub_root)
for ic, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
print('cluster', cluster_i, 'set true labels', set(true_labels))
true_labels = np.asarray(true_labels)
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if (str(root_user) in str(majority_truth)):
print('did not find a super and sub cluster with majority ', root_user)
if deg_list[ic] < min_deg:
root = cluster_i
found_any_root = True
min_deg = deg_list[ic]
print('new root is', root, ' with degree', min_deg)
# print('len graph node label', graph_node_label)
if found_any_root == False:
print('setting arbitrary root', cluster_i)
self.root = cluster_i
return graph_node_label, majority_truth_labels, deg_list, root
def full_graph_paths(self, X_data, n_components_original=1):
# make igraph object of low-K KNN using the knn_struct PCA-dimension space made in PARC.
# This is later used by find_shortest_path for sc_bp visual
# neighbor array is not listed in in any order of proximity
print('number of components in the original full graph', n_components_original)
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=3)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
k_0 = 3
if n_components_original == 1:
while (n_comp > 1):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
if n_components_original > 1:
while (k_0 <= 5) & (n_comp > n_components_original):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
row_list = []
print('size neighbor array in low-KNN in pca-space for visualization', neighbor_array.shape)
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (distance_array.flatten()).tolist()
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
Gr = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
Gr.simplify(combine_edges='sum')
return Gr
def get_gene_expression(self, gene_exp, title_gene=""):
fig_0, ax = plt.subplots()
sc_pt = self.single_cell_pt_markov
sc_bp_original = self.single_cell_bp
n_terminal_states = sc_bp_original.shape[1]
jet = cm.get_cmap('jet', n_terminal_states)
cmap_ = jet(range(n_terminal_states))
# print('cmap', cmap_)
for i in range(n_terminal_states):
sc_bp = sc_bp_original.copy()
loc_terminal_i = np.where(np.asarray(self.labels) == self.terminal_clusters[i])[0]
sc_bp[loc_terminal_i,:] = 1.4
loc_i = np.where(sc_bp[:, i] > 0.8)[0]
val_pt = [sc_pt[pt_i] for pt_i in loc_i] # TODO, replace with array to speed up
# max_val_pt = np.percentile(np.asarray(val_pt),90)
max_val_pt = max(val_pt)
#print('gene exp max pt', max_val_pt)
loc_i_bp = np.where(sc_bp[:, i] > 0.000)[0] #0.001
loc_i_sc = np.where(np.asarray(sc_pt) <= max_val_pt)[0]
# print('loc i bp', loc_i_bp)
# print('loc i sc', loc_i_sc)
loc_ = np.intersect1d(loc_i_bp, loc_i_sc)
# print('loc_', loc_.shape)
gam_in = np.asarray(sc_pt)[loc_]
x = gam_in.reshape(-1, 1)
y = np.asarray(gene_exp)[loc_].reshape(-1, 1)
# print('Gene Expression:', gam_in.shape)
weights = np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
# np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
# print('weights',weights)
# print('weights ==0', np.sum(weights == 0))
# print('Gene Expression: setting up subplot number',i)
if len(loc_)>1:
#geneGAM = pg.LinearGAM(n_splines=20, spline_order=5, lam=10).fit(x, y, weights=weights)
geneGAM = pg.LinearGAM(n_splines=10, spline_order=4, lam=10).fit(x, y, weights=weights)
nx_spacing = 100
xval = np.linspace(min(sc_pt), max_val_pt, nx_spacing * 2)
yg = geneGAM.predict(X=xval)
else: print('loc_ has length zero')
ax.plot(xval, yg, color=cmap_[i], linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round', label='TS:' + str(self.terminal_clusters[i]))
plt.legend()
plt.title('Gene Expression ' + title_gene)
return
def run_subPARC(self):
root_user = self.root_user
X_data = self.data
too_big_factor = self.too_big_factor
small_pop = self.small_pop
jac_std_global = self.jac_std_global
jac_weighted_edges = self.jac_weighted_edges
n_elements = X_data.shape[0]
# if n_elements < 2000: self.knn = 10
n_elements = X_data.shape[0]
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=self.knn)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
#### construct full graph
row_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
print('size neighbor array', neighbor_array.shape)
num_neigh = neighbor_array.shape[1]
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (1. / (distance_array.flatten() + 0.05)).tolist()
# if local_pruning_bool == True: print('share of neighbors discarded in local distance pruning %.1f' % (discard_count / neighbor_array.size))
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
#DO MAGIC IMPUTATION#
if self.do_magic == True:
from sklearn.preprocessing import normalize
magic_steps = 3
Transition_full_graph = normalize(csr_full_graph, norm='l1', axis=1) ** magic_steps
imputed_data = pd.DataFrame(np.dot(Transition_full_graph.todense(), data), index=data.index, columns=data.columns )
n_original_comp, n_original_comp_labels = connected_components(csr_full_graph, directed=False)
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
G = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist) # list of jaccard weights
ig_fullgraph = ig.Graph(list(edgelist), edge_attrs={'weight': sim_list})
ig_fullgraph.simplify(combine_edges='sum')
inv_simlist = [1 - i for i in sim_list]
# full_graph_shortpath = ig.Graph(list(edgelist), edge_attrs={'weight': inv_simlist}) #the weights reflect distances
# full_graph_shortpath.simplify(combine_edges='sum')
# self.full_graph_shortpath = full_graph_shortpath
self.full_graph_shortpath = self.full_graph_paths(X_data, n_original_comp)
####
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources, targets))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
# print('average degree of prejacard graph is %.1f'% (np.mean(G.degree())))
# print('computing Jaccard metric')
sim_list = G.similarity_jaccard(pairs=edgelist_copy)
print('commencing global pruning')
sim_list_array = np.asarray(sim_list)
edge_list_copy_array = np.asarray(edgelist_copy)
if jac_std_global == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_global * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
print('Share of edges kept after Global Pruning %.2f' % (len(strong_locs) / len(sim_list)), '%')
new_edgelist = list(edge_list_copy_array[strong_locs])
sim_list_new = list(sim_list_array[strong_locs])
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
# print('average degree of graph is %.1f' % (np.mean(G_sim.degree())))
G_sim.simplify(combine_edges='sum') # "first"
# print('average degree of SIMPLE graph is %.1f' % (np.mean(G_sim.degree())))
print('commencing community detection')
if jac_weighted_edges == True:
start_leiden = time.time()
# print('call leiden on weighted graph for ', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
else:
start_leiden = time.time()
# print('call leiden on unweighted graph', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
time_end_PARC = time.time()
# print('Q= %.1f' % (partition.quality()))
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
print(pop_list_1)
too_big = False
# print('labels found after Leiden', set(list(PARC_labels_leiden.T)[0])) will have some outlier clusters that need to be added to a cluster if a cluster has members that are KNN
cluster_i_loc = np.where(PARC_labels_leiden == 0)[
0] # the 0th cluster is the largest one. so if cluster 0 is not too big, then the others wont be too big either
pop_i = len(cluster_i_loc)
print('largest cluster population', pop_i, too_big_factor, n_elements)
if pop_i > too_big_factor * n_elements: # 0.4
too_big = True
print('too big is', too_big)
cluster_big_loc = cluster_i_loc
list_pop_too_bigs = [pop_i]
cluster_too_big = 0
while too_big == True:
X_data_big = X_data[cluster_big_loc, :]
print(X_data_big.shape)
PARC_labels_leiden_big = self.run_toobig_subPARC(X_data_big)
# print('set of new big labels ', set(PARC_labels_leiden_big.flatten()))
PARC_labels_leiden_big = PARC_labels_leiden_big + 1000
# print('set of new big labels +1000 ', set(list(PARC_labels_leiden_big.flatten())))
pop_list = []
for item in set(list(PARC_labels_leiden_big.flatten())):
pop_list.append([item, list(PARC_labels_leiden_big.flatten()).count(item)])
# print('pop of new big labels', pop_list)
jj = 0
print('shape PARC_labels_leiden', PARC_labels_leiden.shape)
for j in cluster_big_loc:
PARC_labels_leiden[j] = PARC_labels_leiden_big[jj]
jj = jj + 1
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
print('new set of labels ')
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
print(pop_list_1, set(PARC_labels_leiden))
too_big = False
set_PARC_labels_leiden = set(PARC_labels_leiden)
PARC_labels_leiden = np.asarray(PARC_labels_leiden)
for cluster_ii in set_PARC_labels_leiden:
cluster_ii_loc = np.where(PARC_labels_leiden == cluster_ii)[0]
pop_ii = len(cluster_ii_loc)
not_yet_expanded = pop_ii not in list_pop_too_bigs
if pop_ii > too_big_factor * n_elements and not_yet_expanded == True:
too_big = True
print('cluster', cluster_ii, 'is too big and has population', pop_ii)
cluster_big_loc = cluster_ii_loc
cluster_big = cluster_ii
big_pop = pop_ii
if too_big == True:
list_pop_too_bigs.append(big_pop)
print('cluster', cluster_big, 'is too big with population', big_pop, '. It will be expanded')
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop: # 10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
time_smallpop = time.time()
while (small_pop_exist) == True & (time.time() - time_smallpop < 15):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
PARC_labels_leiden = list(PARC_labels_leiden.flatten())
# print('final labels allocation', set(PARC_labels_leiden))
pop_list = []
pop_list_raw = []
for item in range(len(set(PARC_labels_leiden))):
pop_item = PARC_labels_leiden.count(item)
pop_list.append((item, pop_item))
pop_list_raw.append(pop_item)
print('list of cluster labels and populations', len(pop_list), pop_list)
self.labels = PARC_labels_leiden # list
n_clus = len(set(self.labels))
##determine majority truth
if self.pseudotime == True:
## Make cluster-graph (1)
vc_graph = ig.VertexClustering(ig_fullgraph,
membership=PARC_labels_leiden) # jaccard weights, bigger is better
vc_graph_old = ig.VertexClustering(G_sim, membership=PARC_labels_leiden)
# print('vc graph G_sim', vc_graph)
vc_graph = vc_graph.cluster_graph(combine_edges='sum')
vc_graph_old = vc_graph_old.cluster_graph(combine_edges='sum')
# print('vc graph G_sim', vc_graph)
# print('vc graph G_sim old', vc_graph_old)
reweighted_sparse_vc, edgelist = self.recompute_weights(vc_graph, pop_list_raw)
print('len old edge list', edgelist) # 0.15 for CD34
if self.dataset == 'toy': # ''humanCD34':
global_pruning_std = 2
print('Toy: global cluster graph pruning level', global_pruning_std)
elif self.dataset == 'bcell':
global_pruning_std = 0.15
print('Bcell: global cluster graph pruning level', global_pruning_std)
else:
global_pruning_std = 0.15
print('Humancd34: global cluster graph pruning level', global_pruning_std)
edgeweights, edgelist, comp_labels = local_pruning_clustergraph_mst(reweighted_sparse_vc,
global_pruning_std=global_pruning_std,
preserve_disconnected=self.preserve_disconnected) self.connected_comp_labels = comp_labels
print('final comp labels set', set(comp_labels))
print('len new edge list', edgelist)
locallytrimmed_g = ig.Graph(edgelist, edge_attrs={'weight': edgeweights.tolist()})
locallytrimmed_g = locallytrimmed_g.simplify(combine_edges='sum')
locallytrimmed_sparse_vc = get_sparse_from_igraph(locallytrimmed_g, weight_attr='weight')
layout = locallytrimmed_g.layout_fruchterman_reingold(
weights='weight') rgets = locallytrimmed_sparse_vc.nonzero()
edgelist_simple = list(zip(sources.tolist(), targets.tolist()))
edgelist_unique = set(tuple(sorted(l)) for l in edgelist_simple)
self.edgelist_unique = edgelist_unique
self.edgelist = edgelist
x_lazy = self.x_lazy
alpha_teleport = self.alpha_teleport
graph_dict = {}
n_components, labels = connected_components(csgraph=locallytrimmed_sparse_vc, directed=False,
return_labels=True)
print('there are ', n_components, 'components in the graph')
df_graph = pd.DataFrame(locallytrimmed_sparse_vc.todense())
df_graph['cc'] = labels
df_graph['pt'] = float('NaN')
df_graph['markov_pt'] = float('NaN')
df_graph['majority_truth'] = 'maj truth'
df_graph['graph_node_label'] = 'node label'
set_parc_labels = list(set(PARC_labels_leiden))
set_parc_labels.sort()
print('parc labels', set_parc_labels)
terminal_clus = []
node_deg_list = []
super_terminal_clus_revised = []
pd_columnnames_terminal = []
dict_terminal_super_sub_pairs = {}
self.root = []
for comp_i in range(n_components):
loc_compi = np.where(labels == comp_i)[0]
print('loc_compi', loc_compi)
a_i = df_graph.iloc[loc_compi][loc_compi].values
a_i = csr_matrix(a_i, (a_i.shape[0], a_i.shape[0]))
cluster_labels_subi = [x for x in loc_compi]
sc_labels_subi = [PARC_labels_leiden[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
sc_truelabels_subi = [self.true_label[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
if self.dataset == 'toy':
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
print('super node degree', self.super_node_degree_list)
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
else:
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
elif self.dataset == 'humanCD34':
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_HumanCD34(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
elif self.dataset == 'bcell':
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
'''
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_bcell(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
'''
else:
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
self.root.append(root_i)
for item in node_deg_list_i:
node_deg_list.append(item)
print('a_i shape, true labels shape', a_i.shape, len(sc_truelabels_subi), len(sc_labels_subi))
new_root_index_found = False
for ii, llabel in enumerate(cluster_labels_subi):
if root_i == llabel:
new_root_index = ii
new_root_index_found = True
print('new root index', new_root_index)
if new_root_index_found == False:
print('cannot find the new root index')
new_root_index = 0
hitting_times, roundtrip_times = self.compute_hitting_time(a_i, root=new_root_index,
x_lazy=x_lazy, alpha_teleport=alpha_teleport)
very_high = np.mean(hitting_times) + 1.5 * np.std(hitting_times)
without_very_high_pt = [iii for iii in hitting_times if iii < very_high]
new_very_high = np.mean(without_very_high_pt) + np.std(without_very_high_pt)
print('very high, and new very high', very_high, new_very_high)
new_hitting_times = [x if x < very_high else very_high for x in hitting_times]
hitting_times = np.asarray(new_hitting_times)
scaling_fac = 10 / max(hitting_times)
hitting_times = hitting_times * scaling_fac
s_ai, t_ai = a_i.nonzero()
edgelist_ai = list(zip(s_ai, t_ai))
edgeweights_ai = a_i.data
biased_edgeweights_ai = get_biased_weights(edgelist_ai, edgeweights_ai, hitting_times)
adjacency_matrix_ai = np.zeros((a_i.shape[0], a_i.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix_ai[start, end] = biased_edgeweights_ai[i]
markov_hitting_times_ai = self.simulate_markov(adjacency_matrix_ai,
new_root_index)
print('markov_hitting times ')
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
very_high = np.mean(markov_hitting_times_ai) + 1.5 * np.std(markov_hitting_times_ai)
very_high = min(very_high, max(markov_hitting_times_ai))
without_very_high_pt = [iii for iii in markov_hitting_times_ai if iii < very_high]
new_very_high = min(np.mean(without_very_high_pt) + np.std(without_very_high_pt), very_high)
print('very high, and new very high', very_high, new_very_high)
new_markov_hitting_times_ai = [x if x < very_high else very_high for x in markov_hitting_times_ai]
for eee, ttt in enumerate(new_markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
markov_hitting_times_ai = np.asarray(new_markov_hitting_times_ai)
scaling_fac = 10 / max(markov_hitting_times_ai)
markov_hitting_times_ai = markov_hitting_times_ai * scaling_fac
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
print('markov hitting times', [(i, j) for i, j in enumerate(markov_hitting_times_ai)])
print('hitting times', [(i, j) for i, j in enumerate(hitting_times)])
markov_hitting_times_ai = (markov_hitting_times_ai ) adjacency_matrix_csr_ai = sparse.csr_matrix(adjacency_matrix_ai)
(sources, targets) = adjacency_matrix_csr_ai.nonzero()
edgelist_ai = list(zip(sources, targets))
weights_ai = adjacency_matrix_csr_ai.data
bias_weights_2_ai = get_biased_weights(edgelist_ai, weights_ai, markov_hitting_times_ai, round_no=2)
adjacency_matrix2_ai = np.zeros((adjacency_matrix_ai.shape[0], adjacency_matrix_ai.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix2_ai[start, end] = bias_weights_2_ai[i]
if self.super_terminal_cells == False:
terminal_clus_ai = self.get_terminal_clusters(adjacency_matrix2_ai, markov_hitting_times_ai,
new_root_index)
for i in terminal_clus_ai:
terminal_clus.append(cluster_labels_subi[i])
elif len(self.super_terminal_clusters) > 0:
sub_terminal_clus_temp_ = []
terminal_clus_ai = []
for i in self.super_terminal_clusters:
print('super cluster terminal label', i)
sub_terminal_clus_temp_loc = np.where(np.asarray(self.super_cluster_labels) == i)[0]
temp_set = set(list(np.asarray(self.labels)[sub_terminal_clus_temp_loc]))
temp_max_pt = 0
most_likely_sub_terminal = False
count_frequency_super_in_sub = 0
for j in temp_set:
super_cluster_composition_loc = np.where(np.asarray(self.labels) == j)[0]
super_cluster_composition = self.func_mode(
list(np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]))
if (markov_hitting_times_ai[j] > temp_max_pt) & (super_cluster_composition == i):
temp_max_pt = markov_hitting_times_ai[j]
print('super, j and temp max pt', i, j, temp_max_pt)
most_likely_sub_terminal = j
if most_likely_sub_terminal == False:
print('no sub cluster has majority made of super-cluster ', i)
for j in temp_set:
count_frequency_super_in_sub_temp = list(
np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]).count(j)
if (markov_hitting_times_ai[j] > temp_max_pt) & (
count_frequency_super_in_sub_temp > count_frequency_super_in_sub):
count_frequency_super_in_sub = count_frequency_super_in_sub_temp
temp_max_pt = markov_hitting_times_ai[j]
most_likely_sub_terminal = j
sub_terminal_clus_temp_.append(most_likely_sub_terminal)
if (markov_hitting_times_ai[most_likely_sub_terminal] > np.percentile(
np.asarray(markov_hitting_times_ai), 30)):
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
super_terminal_clus_revised.append(i)
terminal_clus.append(most_likely_sub_terminal)
terminal_clus_ai.append(
np.where(np.asarray(cluster_labels_subi) == most_likely_sub_terminal)[0][0])
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal)
else:
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal, 'but the pseudotime is too low')
else:
print('super terminal cells', self.super_terminal_cells)
print([self.labels[ti] for ti in
self.super_terminal_cells])
temp = [self.labels[ti] for ti in self.super_terminal_cells if
self.labels[ti] in cluster_labels_subi]
terminal_clus_ai = []
for i in temp:
terminal_clus_ai.append(np.where(np.asarray(cluster_labels_subi) == i)[0][0])
terminal_clus.append(i)
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
print('terminal clus in this a_i', terminal_clus_ai)
print('final terminal clus', terminal_clus)
for target_terminal in terminal_clus_ai:
prob_ai = self.simulate_branch_probability(target_terminal, terminal_clus_ai, adjacency_matrix2_ai,
new_root_index, pt=markov_hitting_times_ai, num_sim=500)
df_graph['terminal_clus' + str(cluster_labels_subi[target_terminal])] = 0.0000000
pd_columnnames_terminal.append('terminal_clus' + str(cluster_labels_subi[target_terminal]))
print('prob ai for target terminal', target_terminal, prob_ai)
for k, prob_ii in enumerate(prob_ai):
df_graph.at[cluster_labels_subi[k], 'terminal_clus' + str(
cluster_labels_subi[target_terminal])] = prob_ii
bp_array = df_graph[pd_columnnames_terminal].values
bp_array[np.isnan(bp_array)]=0.00000001
print('final bp_array NOT normed by rowsum', bp_array)
bp_array = bp_array / bp_array.sum(axis=1)[:, None]
bp_array[np.isnan(bp_array)] = 0.00000001
print('final bp_array normed by rowsum', bp_array)
for ei, ii in enumerate(loc_compi):
df_graph.at[ii, 'pt'] = hitting_times[ei]
df_graph.at[ii, 'graph_node_label'] = graph_node_label[ei]
df_graph.at[ii, 'majority_truth'] = graph_node_label[ei]
df_graph.at[ii, 'markov_pt'] = markov_hitting_times_ai[ei]
locallytrimmed_g.vs["label"] = df_graph['graph_node_label'].values
hitting_times = df_graph['pt'].values
if len(super_terminal_clus_revised) > 0:
self.revised_super_terminal_clusters = super_terminal_clus_revised
else:
self.revised_super_terminal_clusters = self.super_terminal_clusters
self.hitting_times = hitting_times
self.markov_hitting_times = df_graph['markov_pt'].values
self.terminal_clusters = terminal_clus
print('terminal clusters', terminal_clus)
self.node_degree_list = node_deg_list
self.project_branch_probability_sc(bp_array)
self.dict_terminal_super_sub_pairs = dict_terminal_super_sub_pairs
hitting_times = self.markov_hitting_times
bias_weights_2_all = get_biased_weights(edgelist, edgeweights, self.markov_hitting_times, round_no=2)
row_list = []
col_list = []
for (rowi, coli) in edgelist:
row_list.append(rowi)
col_list.append(coli)
temp_csr = csr_matrix((np.array(bias_weights_2_all), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
if self.dataset == 'toy': visual_global_pruning_std = 0.15
max_outgoing = 4
else:
visual_global_pruning_std = 1 max_outgoing = 2
edgeweights_maxout_2, edgelist_maxout_2, comp_labels_2 = local_pruning_clustergraph_mst(temp_csr,
global_pruning_std=visual_global_pruning_std,
max_outgoing=max_outgoing,
preserve_disconnected=self.preserve_disconnected)
row_list = []
col_list = []
for (rowi, coli) in edgelist_maxout_2:
row_list.append(rowi)
col_list.append(coli)
temp_csr = csr_matrix((np.array(edgeweights_maxout_2), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
temp_csr = temp_csr.transpose().todense() + temp_csr.todense()
temp_csr = np.tril(temp_csr, -1)
temp_csr = csr_matrix(temp_csr)
edgeweights_maxout_2 = temp_csr.data
scale_factor = max(edgeweights_maxout_2) - min(edgeweights_maxout_2)
edgeweights_maxout_2 = [((wi + .1) * 2.5 / scale_factor) + 0.1 for wi in edgeweights_maxout_2]
sources, targets = temp_csr.nonzero()
edgelist_maxout_2 = list(zip(sources.tolist(), targets.tolist()))
self.edgelist_maxout = edgelist_maxout_2
self.edgeweights_maxout = edgeweights_maxout_2
remove_outliers = hitting_times
threshold = np.percentile(remove_outliers, 95)
th_hitting_times = [x if x < threshold else threshold for x in hitting_times]
remove_outliers_low = hitting_times[hitting_times < (np.mean(hitting_times) - 0.3 * np.std(hitting_times))]
threshold_low = np.mean(remove_outliers_low) - 0.3 * np.std(remove_outliers_low)
threshold_low = np.percentile(remove_outliers_low, 5)
th_hitting_times = [x if x > threshold_low else threshold_low for x in th_hitting_times]
scaled_hitting_times = (th_hitting_times - np.min(th_hitting_times))
scaled_hitting_times = scaled_hitting_times * (1000 / np.max(scaled_hitting_times))
self.scaled_hitting_times = scaled_hitting_times
print('markov hitting times to put in single cell project', self.markov_hitting_times)
self.single_cell_pt_markov = self.project_hittingtimes_sc(self.markov_hitting_times)
print('markov hitting times to put in single cell project', self.single_cell_pt_markov)
threshold = int(threshold)
scaled_hitting_times = scaled_hitting_times.astype(int)
pal = ig.drawing.colors.AdvancedGradientPalette(['yellow', 'green', 'blue'], n=1001)
all_colors = []
for i in scaled_hitting_times:
all_colors.append(pal.get(int(i))[0:3])
locallytrimmed_g.vs['hitting_times'] = scaled_hitting_times
locallytrimmed_g.vs['color'] = [pal.get(i)[0:3] for i in scaled_hitting_times]
self.group_color = [colors.to_hex(v) for v in locallytrimmed_g.vs['color']]
viridis_cmap = cm.get_cmap('viridis_r')
self.group_color_cmap = [colors.to_hex(v) for v in
viridis_cmap(scaled_hitting_times / 1000)]
self.graph_node_label = df_graph['graph_node_label'].values
self.edgeweight = [e['weight'] * 1 for e in locallytrimmed_g.es]
print('self edge weight', len(self.edgeweight), self.edgeweight)
print('self edge list', len(self.edgelist_unique), self.edgelist_unique)
self.graph_node_pos = layout.coords
f, ((ax, ax1, ax2)) = plt.subplots(1, 3, sharey=True)
self.draw_piechart_graph(ax, ax1, ax2)
plt.show()
return
def draw_piechart_graph(self, ax, ax1, ax2, type_pt='original', ):
arrow_head_w = 0.2
edgeweight_scale = 1
node_pos = self.graph_node_pos
edgelist = list(self.edgelist_maxout)
edgeweight = self.edgeweights_maxout
node_pos = np.asarray(node_pos)
graph_node_label = self.graph_node_label
if type_pt == 'original': pt = self.scaled_hitting_times
if type_pt == 'biased_stationary': pt = self.biased_hitting_times_stationary
if type_pt == 'markov': pt = self.markov_hitting_times
import matplotlib.lines as lines
n_groups = len(set(self.labels))
n_truegroups = len(set(self.true_label))
group_pop = np.zeros([n_groups, 1])
group_frac = pd.DataFrame(np.zeros([n_groups, n_truegroups]), columns=list(set(self.true_label)))
for group_i in set(self.labels):
loc_i = np.where(self.labels == group_i)[0]
group_pop[group_i] = len(loc_i)
true_label_in_group_i = list(np.asarray(self.true_label)[[loc_i]])
for ii in set(true_label_in_group_i):
group_frac[ii][group_i] = true_label_in_group_i.count(ii)
group_frac = group_frac.div(group_frac.sum(axis=1), axis=0)
line_true = np.linspace(0, 1, n_truegroups)
color_true_list = [plt.cm.jet(color) for color in line_true]
sct = ax.scatter(
node_pos[:, 0], node_pos[:, 1],
c='white', edgecolors='face', s=group_pop, cmap='jet')
print('draw triangle edgelist', len(edgelist), edgelist)
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax.add_line(lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='grey', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.2))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250], shape='full',
lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
else:
ax.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
trans = ax.transData.transform
bbox = ax.get_position().get_points()
ax_x_min = bbox[0, 0]
ax_x_max = bbox[1, 0]
ax_y_min = bbox[0, 1]
ax_y_max = bbox[1, 1]
ax_len_x = ax_x_max - ax_x_min
ax_len_y = ax_y_max - ax_y_min
trans2 = ax.transAxes.inverted().transform
pie_axs = []
pie_size_ar = ((group_pop - np.min(group_pop)) / (np.max(group_pop) - np.min(group_pop)) + 0.5) / 10
for node_i in range(n_groups):
pie_size = pie_size_ar[node_i][0]
x1, y1 = trans(node_pos[node_i])
xa, ya = trans2((x1, y1))
xa = ax_x_min + (xa - pie_size / 2) * ax_len_x
ya = ax_y_min + (ya - pie_size / 2) * ax_len_y
rect = [xa, ya, pie_size * ax_len_x, pie_size * ax_len_y]
frac = group_frac.iloc[node_i].values
pie_axs.append(plt.axes(rect, frameon=False))
pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
pie_axs[node_i].set_xticks([])
pie_axs[node_i].set_yticks([])
pie_axs[node_i].set_aspect('equal')
pie_axs[node_i].text(0.5, 0.5, graph_node_label[node_i])
patches, texts = pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
labels = list(set(self.true_label))
plt.legend(patches, labels, loc=(-5, -5), fontsize=6)
if self.too_big_factor > 0.1:
is_sub = ' super clusters'
else:
is_sub = ' sub clusters'
ti = 'Reference Group Membership. K=' + str(self.knn) + '. ncomp = ' + str(self.ncomp) + is_sub
ax.set_title(ti)
title_list = ["PT using Markov Simulation", "PT on undirected original graph"]
for i, ax_i in enumerate([ax1, ax2]):
print("drawing axis", i)
if i == 0: pt = self.markov_hitting_times
if i == 1: pt = self.hitting_times
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax_i.add_line(
lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='black', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.5))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax_i.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250],
shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
else:
ax_i.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
c_edge = []
l_width = []
for ei, pti in enumerate(pt):
if ei in self.terminal_clusters:
c_edge.append('red')
l_width.append(1.5)
else:
c_edge.append('gray')
l_width.append(0.0)
gp_scaling = 500 / max(group_pop)
print(gp_scaling, 'gp_scaline')
group_pop_scale = group_pop * gp_scaling
ax_i.scatter(node_pos[:, 0], node_pos[:, 1], s=group_pop_scale, c=pt, cmap='viridis_r', edgecolors=c_edge,
alpha=1, zorder=3, linewidth=l_width)
for ii in range(node_pos.shape[0]):
ax_i.text(node_pos[ii, 0] + 0.5, node_pos[ii, 1] + 0.5, 'c' + str(ii), color='black', zorder=4)
title_pt = title_list[i]
ax_i.set_title(title_pt)
def accuracy(self, onevsall=1):
true_labels = self.true_label
Index_dict = {}
PARC_labels = self.labels
N = len(PARC_labels)
n_cancer = list(true_labels).count(onevsall)
n_pbmc = N - n_cancer
for k in range(N):
Index_dict.setdefault(PARC_labels[k], []).append(true_labels[k])
num_groups = len(Index_dict)
sorted_keys = list(sorted(Index_dict.keys()))
error_count = []
pbmc_labels = []
thp1_labels = []
fp, fn, tp, tn, precision, recall, f1_score = 0, 0, 0, 0, 0, 0, 0
for kk in sorted_keys:
vals = [t for t in Index_dict[kk]]
majority_val = self.func_mode(vals)
if majority_val == onevsall: print('cluster', kk, ' has majority', onevsall, 'with population', len(vals))
if kk == -1:
len_unknown = len(vals)
print('len unknown', len_unknown)
if (majority_val == onevsall) and (kk != -1):
thp1_labels.append(kk)
fp = fp + len([e for e in vals if e != onevsall])
tp = tp + len([e for e in vals if e == onevsall])
list_error = [e for e in vals if e != majority_val]
e_count = len(list_error)
error_count.append(e_count)
elif (majority_val != onevsall) and (kk != -1):
pbmc_labels.append(kk)
tn = tn + len([e for e in vals if e != onevsall])
fn = fn + len([e for e in vals if e == onevsall])
error_count.append(len([e for e in vals if e != majority_val]))
predict_class_array = np.array(PARC_labels)
PARC_labels_array = np.array(PARC_labels)
number_clusters_for_target = len(thp1_labels)
for cancer_class in thp1_labels:
predict_class_array[PARC_labels_array == cancer_class] = 1
for benign_class in pbmc_labels:
predict_class_array[PARC_labels_array == benign_class] = 0
predict_class_array.reshape((predict_class_array.shape[0], -1))
error_rate = sum(error_count) / N
n_target = tp + fn
tnr = tn / n_pbmc
fnr = fn / n_cancer
tpr = tp / n_cancer
fpr = fp / n_pbmc
if tp != 0 or fn != 0: recall = tp / (tp + fn)
if tp != 0 or fp != 0: precision = tp / (tp + fp)
if precision != 0 or recall != 0:
f1_score = precision * recall * 2 / (precision + recall)
majority_truth_labels = np.empty((len(true_labels), 1), dtype=object)
for cluster_i in set(PARC_labels):
cluster_i_loc = np.where(np.asarray(PARC_labels) == cluster_i)[0]
true_labels = np.asarray(true_labels)
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = majority_truth
majority_truth_labels = list(majority_truth_labels.flatten())
accuracy_val = [error_rate, f1_score, tnr, fnr, tpr, fpr, precision,
recall, num_groups, n_target]
return accuracy_val, predict_class_array, majority_truth_labels, number_clusters_for_target
def run_PARC(self):
print('input data has shape', self.data.shape[0], '(samples) x', self.data.shape[1], '(features)')
self.ncomp = self.data.shape[1]
pop_list = []
for item in set(list(self.true_label)):
pop_list.append([item, list(self.true_label).count(item)])
if self.true_label is None:
self.true_label = [1] * self.data.shape[0]
list_roc = []
time_start_total = time.time()
time_start_knn = time.time()
self.knn_struct = self.make_knn_struct()
time_end_knn_struct = time.time() - time_start_knn
self.run_subPARC()
run_time = time.time() - time_start_total
print('time elapsed {:.1f} seconds'.format(run_time))
targets = list(set(self.true_label))
N = len(list(self.true_label))
self.f1_accumulated = 0
self.f1_mean = 0
self.stats_df = pd.DataFrame({'jac_std_global': [self.jac_std_global], 'dist_std_local': [self.dist_std_local],
'runtime(s)': [run_time]})
self.majority_truth_labels = []
if len(targets) > 1:
f1_accumulated = 0
f1_acc_noweighting = 0
for onevsall_val in targets:
print('target is', onevsall_val)
vals_roc, predict_class_array, majority_truth_labels, numclusters_targetval = self.accuracy(
onevsall=onevsall_val)
f1_current = vals_roc[1]
print('target', onevsall_val, 'has f1-score of %.2f' % (f1_current * 100))
f1_accumulated = f1_accumulated + f1_current * (list(self.true_label).count(onevsall_val)) / N
f1_acc_noweighting = f1_acc_noweighting + f1_current
list_roc.append(
[self.jac_std_global, self.dist_std_local, onevsall_val] + vals_roc + [numclusters_targetval] + [
run_time])
f1_mean = f1_acc_noweighting / len(targets)
print("f1-score (unweighted) mean %.2f" % (f1_mean * 100), '%')
print('f1-score weighted (by population) %.2f' % (f1_accumulated * 100), '%')
df_accuracy = pd.DataFrame(list_roc,
columns=['jac_std_global', 'dist_std_local', 'onevsall-target', 'error rate',
'f1-score', 'tnr', 'fnr',
'tpr', 'fpr', 'precision', 'recall', 'num_groups',
'population of target', 'num clusters', 'clustering runtime'])
self.f1_accumulated = f1_accumulated
self.f1_mean = f1_mean
self.stats_df = df_accuracy
self.majority_truth_labels = majority_truth_labels
return
def run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823'):
norm_df_pal = pd.DataFrame(ad.X)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.index = new
norm_df_pal.columns =[i for i in ad.var_names]
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res)
print('ms data', ms_data.shape)
r_true_label = pd.Series(revised_clus, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
ir.plot.plot_gene_trends(gene_trends)
plt.show()
def slalom_human():
import os
import slalom
from slalom import plotFactors, plotRelevance, plotLoadings, saveFA, dumpFA
data_dir = '/home/shobi/Trajectory/Datasets/'
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
annoDB = 'custom'
annoFile = os.path.join(data_dir, 'geneset.gmt')
data_slalom = slalom.utils.load_txt(df=df_.T, annoFiles=annoFile, annoDBs=annoDB)
print("Loaded {:d} cells, {:d} genes".format(data_slalom['Y'].shape[0], data_slalom['Y'].shape[1]))
print("Annotation: {:d} terms".format(len(data_slalom['terms'])))
print('data terms', data_slalom['terms'])
print(data_slalom['genes'])
print(data_slalom['lab'])
# I: indicator matrix that assigns genes to pathways
I = data_slalom['I'] # if loaded from the hdf file change to I = data['IMSigDB']
# Y: log expresison values
Y = data_slalom['Y']
# terms: ther names of the terms
terms = data_slalom['terms']
print("terms", terms)
# gene_ids: the ids of the genes in Y
gene_ids = data_slalom['genes']
print('gene_ids', gene_ids)
print(I.shape, Y.shape, terms.shape)
# initialize FA instance, here using a Gaussian noise model and fitting 3 dense hidden factors
FA = slalom.initFA(Y, terms, I, gene_ids=gene_ids, noise='gauss', nHidden=3, minGenes=1)
FA.train()
# print diagnostics
FA.printDiagnostics()
fig = plotRelevance(FA, madFilter=0)
# idx=FA.getTermIndex(['G2m checkpoint', 'P53 pathway'])
# print('idx',idx)
corrected_data = FA.regressOut(
terms=['M phase', 'Dna replication', 'Chromosome segregation', 'M phase of mitotic cell cycle',
'Organelle fission'])
print('corrected_data.shape', corrected_data.shape)
full_matrix = df_.copy()
print(full_matrix.head)
annotated_genes = np.array(data_slalom['genes'])[np.sum(data_slalom['I'], axis=1) != 0]
print('annotated genes', len(annotated_genes), annotated_genes)
full_matrix[annotated_genes] = corrected_data
print('full shape ', full_matrix)
return full_matrix
def main_Human(ncomps=100, knn=30, p0_random_seed=4, run_palantir_func = False):
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
ncomps = ncomps# 40 ncomps and 20KNN works well
knn = knn # 30
p0_random_seed =p0_random_seed
print('ncomp =', ncomps, ' knn=', knn, ' randseed=', p0_random_seed)
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(
ad.X) # slalom_human())#(ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=1000, log=True) #using this or the .X scaled version is pretty much the same.
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
if run_palantir_func == True:
run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823')
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
gene_list = ['ITGAX']#['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
#'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
for gene_name in gene_list:# 'GATA2',
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
print('gene name', gene_name, loc_gata)
#print('xpca',norm_df['X_pca'])
true_label = nover_labels # revised_clus
print('p0 random seed', p0_random_seed)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.4,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/HumanCD34/", root=1,
root_user=4823, dataset='humanCD34', preserve_disconnected=True, random_seed=p0_random_seed) # *.4
p0.run_PARC()
super_labels = p0.labels
print('super labels', set(super_labels))
ad.obs['parc0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['parc0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA','ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
print('make the p0 matrix plot')
sc.pl.matrixplot(magic_ad, marker_genes, groupby='parc0_label')
super_edges = p0.edgelist_maxout # p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p0.terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/HumanCD34/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=4823,
x_lazy=0.99, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=p0.terminal_clusters) # *.4super_terminal_cells = tsi_list
p1.run_PARC()
labels = p1.labels
ad.obs['parc1_label'] = [str(i) for i in labels]
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=p1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # p1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
loaded_magic_df.head()
for gene_name in ['ITGA2B','IL3RA','ITGAX','IRF8']:#['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
#DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO':'MPO (Mono)', 'CD79B':'CD79B (B)','IRF8':'IRF8 (DC)', 'SPI1':'PU.1','CD34': 'CD34','CSF1R':'CSF1R (pDC. Up then Down in cDC)','IL3RA':'CD123 (pDC)','IRF4': 'IRF4 (pDC)', 'ITGAX':'ITGAX (cDCs)','CSF2RA':'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
#magic_ad=loaded_magic_df[gene_name]
p1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:15])
# print('tsne input size', adata_counts.obsm['X_pca'].shape)
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace( p0, p1, idx)
draw_trajectory_gams(embedding,super_clus_ds_PCA_loc, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
# final_super_terminal=p0.terminal clusters
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
lineP0 = np.linspace(0, 1, len(set(p0.labels)))
lineP1 = np.linspace(0, 1, len(set(p1.labels)))
# find the single-cell which is nearest to the average-location of a terminal cluster - for just the sub-set of downsampled points in the corresponding PCA-space
new_tsi_list = []
# find the single-cell which is nearest to the average-location of a terminal cluster
# TODO make a knn in the downsampled PCA-space
X_ds = adata_counts.obsm['X_pca'][:, 0:ncomps][idx]
p_ds = hnswlib.Index(space='l2', dim=ncomps)
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for tsi_item in tsi_list:
labelsq, distances = p_ds.knn_query(adata_counts.obsm['X_pca'][:, 0:ncomps][tsi_item, :], k=1)
new_tsi_list.append(labelsq[0][0])
# for old_tsi_i in tsi_list:
# temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
# labelsq, distances = p1.knn_struct.query(.knn_query(temp, k=1)
# print(labelsq[0])
# tsi_list.append(labelsq[0][0])
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
ff, (ax11, ax22) = plt.subplots(1, 2, sharey=True)
col_i = 0
for color, group in zip(line, set(true_label)):
marker_x = marker[random.randint(0, 5)]
where = np.where(np.asarray(true_label) == group)[0]
# ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=plt.cm.jet(color))
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax1.legend(fontsize=6)
ax1.set_title('true labels')
for color, group in zip(lineP0, set(p0.labels)):
where = np.where(super_labels == group)[0]
ax11.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax11.legend(fontsize=6)
ax11.set_title('p0 labels')
for color, group in zip(lineP1, set(p1.labels)):
where = np.where(labels == group)[0]
ax22.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax22.legend(fontsize=6)
ax22.set_title('p1 labels')
ax3.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
ax2.set_title("terminal clus from P0 super clus:" + str(ncomps) + '. knn:' + str(knn)+ 'randseed' +str( p0_random_seed))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
jj = 0
for ti in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(super_labels == ti)[0]
val_pt = [sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
x = embedding[labelsq[0], 0]
y = embedding[labelsq[0], 1]
# ax2.scatter(np.mean(x), np.mean(y), label='ts' + str(ti)+'M'+str(maj), c='red', s=15)
# ax2.scatter(x, y, label='TS' + str(ti), c='red', s=10)
# ax3.scatter(x, y, label='TS' + str(ti), c='red', s=10)
ax2.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1], label='TS' + str(ti), c='pink', s=18) # PCs HNSW
# ax3.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1], label='TS' + str(p1.labels[tsi_list[jj]]), c='pink',s=18)
ax2.text(embedding[new_tsi_list[jj], 0]+0.05, embedding[new_tsi_list[jj], 1]+ 0.05, 'TS' + str(ti), color='black', zorder=3)
# ax3.text(np.mean(x) + 0.05, np.mean(y) + 0.05, 'TS' + str(ti), color='black', zorder=3)
ax2.legend(fontsize=6)
jj = jj + 1
jj = 0
print('')
for ti in p1.terminal_clusters:
print('terminal ti', ti)
loc_i = np.where(np.asarray(labels) == ti)[0]
#print(np.where(labels == ti), np.where(np.asarray(labels) == ti) ,loc_i)
val_pt = [sc_pt_markov[i] for i in loc_i]
print(val_pt)
th_pt = np.percentile(val_pt, 0) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
x = embedding[labelsq[0], 0]
y = embedding[labelsq[0], 1]
# ax2.scatter(np.mean(x), np.mean(y), label='ts' + str(ti)+'M'+str(maj), c='red', s=15)
# ax2.scatter(x, y, label='TS' + str(ti), c='red', s=10)
# ax3.scatter(x, y, label='TS' + str(ti), c='red', s=10)
ax3.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1],
label='TS' + str(ti), c='pink', s=18)
ax3.text(embedding[new_tsi_list[jj], 0]+0.05, embedding[new_tsi_list[jj], 1] + 0.05, 'TS' + str(ti), color='black', zorder=3)
jj = jj + 1
draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def mainToy():
dataset = "Toy3" # ""Toy1" # GermlineLi #Toy1
## Dataset Germline Li https://zenodo.org/record/1443566#.XZlhEkEzZ5y
if dataset == "GermlineLine":
df_expression_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li.csv", 'rt',
delimiter=",")
print(df_expression_ids.shape)
# print(df_expression_ids[['cell_id',"week","ACTG2","STK31"]])[10:12]
df_counts = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_filteredcounts.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_labels.csv", 'rt',
delimiter=",")
# print(df_counts.shape, df_counts.head() ,df_ids.shape)
# X_counts = df_counts.values
# print(X_counts.shape)
# varnames = pd.Categorical(list(df_counts.columns))
adata_counts = sc.AnnData(df_counts, obs=df_ids)
print(adata_counts.obs)
sc.pp.filter_cells(adata_counts, min_counts=1)
print(adata_counts.n_obs)
sc.pp.filter_genes(adata_counts, min_counts=1) # only consider genes with more than 1 count
print(adata_counts.X.shape)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata_counts, key_n_counts='n_counts_all')
print(adata_counts.X.shape, len(list(adata_counts.var_names)))
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata_counts.X, flavor='cell_ranger', n_top_genes=1000, log=False)
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # , list(adata_counts.var_names))
adata_counts = adata_counts[:, filter_result.gene_subset]
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # ,list(adata_counts.var_names))
# subset the genes
sc.pp.normalize_per_cell(adata_counts) # renormalize after filtering
sc.pp.log1p(adata_counts) # log transform: adata_counts.X = log(adata_counts.X + 1)
sc.pp.scale(adata_counts) # scale to unit variance and shift to zero mean
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=20)
true_label = list(adata_counts.obs['week'])
sc.pp.neighbors(adata_counts, n_neighbors=10, n_pcs=20)
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='gender_week', legend_loc='right margin', palette='jet')
## Dataset Paul15 https://scanpy-tutorials.readthedocs.io/en/latest/paga-paul15.html
if dataset == 'Paul15':
root_user = "8Mk"
adata_counts = sc.datasets.paul15()
sc.pp.recipe_zheng17(adata_counts)
sc.tl.pca(adata_counts, svd_solver='arpack')
true_label = list(adata_counts.obs['paul15_clusters']) # PAUL
adata_counts.obs['group_id'] = true_label
# sc.pp.neighbors(adata_counts, n_neighbors=10)
# sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color=['paul15_clusters', 'Cma1'], legend_loc='on data')
if dataset.startswith('Toy'):
root_user = 'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy1":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000_ids.csv",
'rt', delimiter=",")
if dataset == "Toy2":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy3":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000_ids.csv", 'rt',
delimiter=",")
if dataset == "ToyCyclic":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy4":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000_ids.csv", 'rt',
delimiter=",")
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
true_label = df_ids['group_id']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=20) not helpful for toy data
ncomps = 50
knn = 30
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# clusters = palantir.utils.determine_cell_clusters(pca_projections)
from sklearn.decomposition import PCA
pca = PCA(n_components=ncomps)
pc = pca.fit_transform(df_counts)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + dataset + "/", root=2,
root_user=root_user, preserve_disconnected=True, dataset='toy') # *.4
p0.run_PARC()
super_labels = p0.labels
super_edges = p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
# 0.05 for p1 toobig
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=1, dist_std_local=0.15, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + dataset + "/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
super_terminal_clusters=p0.terminal_clusters)
# in the case of TOY DATA: P1 WORKS MUCH BETTER WHEN ONLY USING SUPER_TERMINAL_CLUS... O/W need to omit pruning
p1.run_PARC()
labels = p1.labels
# p1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# p1.run_PARC()
# labels = p1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=900, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = TSNE().fit_transform(pc) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters, sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(pc.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
#draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Bcell():
def run_zheng(adata, min_counts=3, n_top_genes=500, do_log=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all'
)
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False
)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
sc.pp.scale(adata) # scale to unit variance and shift to zero mean
return adata
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
# print('norm df', norm_df_pal)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c23' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, ncomps, knn)
plt.show()
def find_time(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
Bcell = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_count_table.txt', sep='\t')
gene_name = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
# for i in Bcell_columns:
# print(i)
# adata_counts.var_names_make_unique()
time_list = [find_time(s) for s in Bcell_columns]
ID_list = [find_cellID(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
# sc.pp.filter_genes(adata_counts, min_counts=3)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
adata_counts = run_zheng(adata_counts, n_top_genes=1000, min_counts=10, do_log=True)
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
ncomps = 100 # (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
knn = 20
random_seed = 1
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
embedding = umap.UMAP(random_state=42, n_neighbors=12, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
# run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + 'bcell' + "/", root=2,
root_user=0, preserve_disconnected=True, random_seed=random_seed) # *.4#root_user = 34
p0.run_PARC()
super_labels = p0.labels
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=100, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(30)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=1, dist_std_local=0.15, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + "bcell/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=0,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=p0.terminal_clusters, random_seed=random_seed)
# in the case of TOY DATA: P1 WORKS MUCH BETTER WHEN ONLY USING SUPER_TERMINAL_CLUS... O/W need to omit pruning
p1.run_PARC()
labels = p1.labels
super_edges = p0.edgelist
print('p1 markov times', p1.markov_hitting_times)
print('p1 markov times', p1.single_cell_pt_markov)
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
for gene_name in Bcell_marker_gene_list:
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
print('loc gata', loc_gata)
magic_ad = adata_counts_unfiltered.X[:, loc_gata]
p1.get_gene_expression(magic_ad, gene_name)
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=900, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:5]) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.arange(0, len(labels)) # np.random.randint(len(labels), size=len(labels))
sc_pt_markov = p1.single_cell_pt_markov
# embedding = umap.UMAP(random_state=42, n_neighbors=15, init=umap_init).fit_transform( adata_counts.obsm['X_pca'][:, 0:5])
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main():
dataset = 'Human'#'bcell'##''Human' # 'Toy'
if dataset == 'Human':
main_Human(ncomps=100, knn=30, p0_random_seed=4, run_palantir_func=False)
elif dataset == 'bcell':
main_Bcell()
else:
mainToy()
if __name__ == '__main__':
main()
| true | true |
1c3c4d5c4687d6171dcb57b98a7e1ca4b53999c6 | 9,971 | py | Python | stanfordcorenlp/corenlp.py | icmpnorequest/stanford-corenlp | 5cefdb271f6f6b63648f869e6d39aa4ff5ae944e | [
"MIT"
] | 882 | 2017-05-28T16:55:25.000Z | 2022-03-27T17:10:17.000Z | stanfordcorenlp/corenlp.py | icmpnorequest/stanford-corenlp | 5cefdb271f6f6b63648f869e6d39aa4ff5ae944e | [
"MIT"
] | 84 | 2017-05-28T18:38:34.000Z | 2022-03-10T05:17:38.000Z | stanfordcorenlp/corenlp.py | icmpnorequest/stanford-corenlp | 5cefdb271f6f6b63648f869e6d39aa4ff5ae944e | [
"MIT"
] | 224 | 2017-06-11T13:49:29.000Z | 2022-02-23T16:17:06.000Z | # _*_coding:utf-8_*_
from __future__ import print_function
import glob
import json
import logging
import os
import re
import socket
import subprocess
import sys
import time
import psutil
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
class StanfordCoreNLP:
def __init__(self, path_or_host, port=None, memory='4g', lang='en', timeout=1500, quiet=True,
logging_level=logging.WARNING, max_retries=5):
self.path_or_host = path_or_host
self.port = port
self.memory = memory
self.lang = lang
self.timeout = timeout
self.quiet = quiet
self.logging_level = logging_level
logging.basicConfig(level=self.logging_level)
# Check args
self._check_args()
if path_or_host.startswith('http'):
self.url = path_or_host + ':' + str(port)
logging.info('Using an existing server {}'.format(self.url))
else:
# Check Java
if not subprocess.call(['java', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0:
raise RuntimeError('Java not found.')
# Check if the dir exists
if not os.path.isdir(self.path_or_host):
raise IOError(str(self.path_or_host) + ' is not a directory.')
directory = os.path.normpath(self.path_or_host) + os.sep
self.class_path_dir = directory
# Check if the language specific model file exists
switcher = {
'en': 'stanford-corenlp-[0-9].[0-9].[0-9]-models.jar',
'zh': 'stanford-chinese-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'ar': 'stanford-arabic-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'fr': 'stanford-french-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'de': 'stanford-german-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'es': 'stanford-spanish-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar'
}
jars = {
'en': 'stanford-corenlp-x.x.x-models.jar',
'zh': 'stanford-chinese-corenlp-yyyy-MM-dd-models.jar',
'ar': 'stanford-arabic-corenlp-yyyy-MM-dd-models.jar',
'fr': 'stanford-french-corenlp-yyyy-MM-dd-models.jar',
'de': 'stanford-german-corenlp-yyyy-MM-dd-models.jar',
'es': 'stanford-spanish-corenlp-yyyy-MM-dd-models.jar'
}
if len(glob.glob(directory + switcher.get(self.lang))) <= 0:
raise IOError(jars.get(
self.lang) + ' not exists. You should download and place it in the ' + directory + ' first.')
# If port not set, auto select
if self.port is None:
for port_candidate in range(9000, 65535):
if port_candidate not in [conn.laddr[1] for conn in psutil.net_connections()]:
self.port = port_candidate
break
# Check if the port is in use
if self.port in [conn.laddr[1] for conn in psutil.net_connections()]:
raise IOError('Port ' + str(self.port) + ' is already in use.')
# Start native server
logging.info('Initializing native server...')
cmd = "java"
java_args = "-Xmx{}".format(self.memory)
java_class = "edu.stanford.nlp.pipeline.StanfordCoreNLPServer"
class_path = '"{}*"'.format(directory)
args = [cmd, java_args, '-cp', class_path, java_class, '-port', str(self.port)]
args = ' '.join(args)
logging.info(args)
# Silence
with open(os.devnull, 'w') as null_file:
out_file = None
if self.quiet:
out_file = null_file
self.p = subprocess.Popen(args, shell=True, stdout=out_file, stderr=subprocess.STDOUT)
logging.info('Server shell PID: {}'.format(self.p.pid))
self.url = 'http://localhost:' + str(self.port)
# Wait until server starts
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = urlparse(self.url).hostname
time.sleep(1) # OSX, not tested
trial = 1
while sock.connect_ex((host_name, self.port)):
if trial > max_retries:
raise ValueError('Corenlp server is not available')
logging.info('Waiting until the server is available.')
trial += 1
time.sleep(1)
logging.info('The server is available.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
logging.info('Cleanup...')
if hasattr(self, 'p'):
try:
parent = psutil.Process(self.p.pid)
except psutil.NoSuchProcess:
logging.info('No process: {}'.format(self.p.pid))
return
if self.class_path_dir not in ' '.join(parent.cmdline()):
logging.info('Process not in: {}'.format(parent.cmdline()))
return
children = parent.children(recursive=True)
for process in children:
logging.info('Killing pid: {}, cmdline: {}'.format(process.pid, process.cmdline()))
# process.send_signal(signal.SIGTERM)
process.kill()
logging.info('Killing shell pid: {}, cmdline: {}'.format(parent.pid, parent.cmdline()))
# parent.send_signal(signal.SIGTERM)
parent.kill()
def annotate(self, text, properties=None):
if sys.version_info.major >= 3:
text = text.encode('utf-8')
r = requests.post(self.url, params={'properties': str(properties)}, data=text,
headers={'Connection': 'close'})
return r.text
def tregex(self, sentence, pattern):
tregex_url = self.url + '/tregex'
r_dict = self._request(tregex_url, "tokenize,ssplit,depparse,parse", sentence, pattern=pattern)
return r_dict
def tokensregex(self, sentence, pattern):
tokensregex_url = self.url + '/tokensregex'
r_dict = self._request(tokensregex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def semgrex(self, sentence, pattern):
semgrex_url = self.url + '/semgrex'
r_dict = self._request(semgrex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def word_tokenize(self, sentence, span=False):
r_dict = self._request('ssplit,tokenize', sentence)
tokens = [token['originalText'] for s in r_dict['sentences'] for token in s['tokens']]
# Whether return token span
if span:
spans = [(token['characterOffsetBegin'], token['characterOffsetEnd']) for s in r_dict['sentences'] for token
in s['tokens']]
return tokens, spans
else:
return tokens
def pos_tag(self, sentence):
r_dict = self._request(self.url, 'pos', sentence)
words = []
tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
tags.append(token['pos'])
return list(zip(words, tags))
def ner(self, sentence):
r_dict = self._request(self.url, 'ner', sentence)
words = []
ner_tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
ner_tags.append(token['ner'])
return list(zip(words, ner_tags))
def parse(self, sentence):
r_dict = self._request(self.url, 'pos,parse', sentence)
return [s['parse'] for s in r_dict['sentences']][0]
def dependency_parse(self, sentence):
r_dict = self._request(self.url, 'depparse', sentence)
return [(dep['dep'], dep['governor'], dep['dependent']) for s in r_dict['sentences'] for dep in
s['basicDependencies']]
def coref(self, text):
r_dict = self._request('coref', text)
corefs = []
for k, mentions in r_dict['corefs'].items():
simplified_mentions = []
for m in mentions:
simplified_mentions.append((m['sentNum'], m['startIndex'], m['endIndex'], m['text']))
corefs.append(simplified_mentions)
return corefs
def switch_language(self, language="en"):
self._check_language(language)
self.lang = language
def _request(self, url, annotators=None, data=None, *args, **kwargs):
if sys.version_info.major >= 3:
data = data.encode('utf-8')
properties = {'annotators': annotators, 'outputFormat': 'json'}
params = {'properties': str(properties), 'pipelineLanguage': self.lang}
if 'pattern' in kwargs:
params = {"pattern": kwargs['pattern'], 'properties': str(properties), 'pipelineLanguage': self.lang}
logging.info(params)
r = requests.post(url, params=params, data=data, headers={'Connection': 'close'})
r_dict = json.loads(r.text)
return r_dict
def _check_args(self):
self._check_language(self.lang)
if not re.match('\dg', self.memory):
raise ValueError('memory=' + self.memory + ' not supported. Use 4g, 6g, 8g and etc. ')
def _check_language(self, lang):
if lang not in ['en', 'zh', 'ar', 'fr', 'de', 'es']:
raise ValueError('lang=' + self.lang + ' not supported. Use English(en), Chinese(zh), Arabic(ar), '
'French(fr), German(de), Spanish(es).')
| 38.949219 | 120 | 0.568549 |
from __future__ import print_function
import glob
import json
import logging
import os
import re
import socket
import subprocess
import sys
import time
import psutil
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
class StanfordCoreNLP:
def __init__(self, path_or_host, port=None, memory='4g', lang='en', timeout=1500, quiet=True,
logging_level=logging.WARNING, max_retries=5):
self.path_or_host = path_or_host
self.port = port
self.memory = memory
self.lang = lang
self.timeout = timeout
self.quiet = quiet
self.logging_level = logging_level
logging.basicConfig(level=self.logging_level)
self._check_args()
if path_or_host.startswith('http'):
self.url = path_or_host + ':' + str(port)
logging.info('Using an existing server {}'.format(self.url))
else:
if not subprocess.call(['java', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0:
raise RuntimeError('Java not found.')
if not os.path.isdir(self.path_or_host):
raise IOError(str(self.path_or_host) + ' is not a directory.')
directory = os.path.normpath(self.path_or_host) + os.sep
self.class_path_dir = directory
switcher = {
'en': 'stanford-corenlp-[0-9].[0-9].[0-9]-models.jar',
'zh': 'stanford-chinese-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'ar': 'stanford-arabic-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'fr': 'stanford-french-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'de': 'stanford-german-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'es': 'stanford-spanish-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar'
}
jars = {
'en': 'stanford-corenlp-x.x.x-models.jar',
'zh': 'stanford-chinese-corenlp-yyyy-MM-dd-models.jar',
'ar': 'stanford-arabic-corenlp-yyyy-MM-dd-models.jar',
'fr': 'stanford-french-corenlp-yyyy-MM-dd-models.jar',
'de': 'stanford-german-corenlp-yyyy-MM-dd-models.jar',
'es': 'stanford-spanish-corenlp-yyyy-MM-dd-models.jar'
}
if len(glob.glob(directory + switcher.get(self.lang))) <= 0:
raise IOError(jars.get(
self.lang) + ' not exists. You should download and place it in the ' + directory + ' first.')
if self.port is None:
for port_candidate in range(9000, 65535):
if port_candidate not in [conn.laddr[1] for conn in psutil.net_connections()]:
self.port = port_candidate
break
if self.port in [conn.laddr[1] for conn in psutil.net_connections()]:
raise IOError('Port ' + str(self.port) + ' is already in use.')
logging.info('Initializing native server...')
cmd = "java"
java_args = "-Xmx{}".format(self.memory)
java_class = "edu.stanford.nlp.pipeline.StanfordCoreNLPServer"
class_path = '"{}*"'.format(directory)
args = [cmd, java_args, '-cp', class_path, java_class, '-port', str(self.port)]
args = ' '.join(args)
logging.info(args)
with open(os.devnull, 'w') as null_file:
out_file = None
if self.quiet:
out_file = null_file
self.p = subprocess.Popen(args, shell=True, stdout=out_file, stderr=subprocess.STDOUT)
logging.info('Server shell PID: {}'.format(self.p.pid))
self.url = 'http://localhost:' + str(self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = urlparse(self.url).hostname
time.sleep(1)
trial = 1
while sock.connect_ex((host_name, self.port)):
if trial > max_retries:
raise ValueError('Corenlp server is not available')
logging.info('Waiting until the server is available.')
trial += 1
time.sleep(1)
logging.info('The server is available.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
logging.info('Cleanup...')
if hasattr(self, 'p'):
try:
parent = psutil.Process(self.p.pid)
except psutil.NoSuchProcess:
logging.info('No process: {}'.format(self.p.pid))
return
if self.class_path_dir not in ' '.join(parent.cmdline()):
logging.info('Process not in: {}'.format(parent.cmdline()))
return
children = parent.children(recursive=True)
for process in children:
logging.info('Killing pid: {}, cmdline: {}'.format(process.pid, process.cmdline()))
process.kill()
logging.info('Killing shell pid: {}, cmdline: {}'.format(parent.pid, parent.cmdline()))
parent.kill()
def annotate(self, text, properties=None):
if sys.version_info.major >= 3:
text = text.encode('utf-8')
r = requests.post(self.url, params={'properties': str(properties)}, data=text,
headers={'Connection': 'close'})
return r.text
def tregex(self, sentence, pattern):
tregex_url = self.url + '/tregex'
r_dict = self._request(tregex_url, "tokenize,ssplit,depparse,parse", sentence, pattern=pattern)
return r_dict
def tokensregex(self, sentence, pattern):
tokensregex_url = self.url + '/tokensregex'
r_dict = self._request(tokensregex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def semgrex(self, sentence, pattern):
semgrex_url = self.url + '/semgrex'
r_dict = self._request(semgrex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def word_tokenize(self, sentence, span=False):
r_dict = self._request('ssplit,tokenize', sentence)
tokens = [token['originalText'] for s in r_dict['sentences'] for token in s['tokens']]
if span:
spans = [(token['characterOffsetBegin'], token['characterOffsetEnd']) for s in r_dict['sentences'] for token
in s['tokens']]
return tokens, spans
else:
return tokens
def pos_tag(self, sentence):
r_dict = self._request(self.url, 'pos', sentence)
words = []
tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
tags.append(token['pos'])
return list(zip(words, tags))
def ner(self, sentence):
r_dict = self._request(self.url, 'ner', sentence)
words = []
ner_tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
ner_tags.append(token['ner'])
return list(zip(words, ner_tags))
def parse(self, sentence):
r_dict = self._request(self.url, 'pos,parse', sentence)
return [s['parse'] for s in r_dict['sentences']][0]
def dependency_parse(self, sentence):
r_dict = self._request(self.url, 'depparse', sentence)
return [(dep['dep'], dep['governor'], dep['dependent']) for s in r_dict['sentences'] for dep in
s['basicDependencies']]
def coref(self, text):
r_dict = self._request('coref', text)
corefs = []
for k, mentions in r_dict['corefs'].items():
simplified_mentions = []
for m in mentions:
simplified_mentions.append((m['sentNum'], m['startIndex'], m['endIndex'], m['text']))
corefs.append(simplified_mentions)
return corefs
def switch_language(self, language="en"):
self._check_language(language)
self.lang = language
def _request(self, url, annotators=None, data=None, *args, **kwargs):
if sys.version_info.major >= 3:
data = data.encode('utf-8')
properties = {'annotators': annotators, 'outputFormat': 'json'}
params = {'properties': str(properties), 'pipelineLanguage': self.lang}
if 'pattern' in kwargs:
params = {"pattern": kwargs['pattern'], 'properties': str(properties), 'pipelineLanguage': self.lang}
logging.info(params)
r = requests.post(url, params=params, data=data, headers={'Connection': 'close'})
r_dict = json.loads(r.text)
return r_dict
def _check_args(self):
self._check_language(self.lang)
if not re.match('\dg', self.memory):
raise ValueError('memory=' + self.memory + ' not supported. Use 4g, 6g, 8g and etc. ')
def _check_language(self, lang):
if lang not in ['en', 'zh', 'ar', 'fr', 'de', 'es']:
raise ValueError('lang=' + self.lang + ' not supported. Use English(en), Chinese(zh), Arabic(ar), '
'French(fr), German(de), Spanish(es).')
| true | true |
1c3c4d5f7591d15c9226f6e4d4579353121a6ee3 | 70,836 | py | Python | vu_lib/ipy_lib3.py | markmelnic/VU-Lib | 78f403d54bce1d4b82912a6daffebf15a96f64c6 | [
"MIT"
] | null | null | null | vu_lib/ipy_lib3.py | markmelnic/VU-Lib | 78f403d54bce1d4b82912a6daffebf15a96f64c6 | [
"MIT"
] | null | null | null | vu_lib/ipy_lib3.py | markmelnic/VU-Lib | 78f403d54bce1d4b82912a6daffebf15a96f64c6 | [
"MIT"
] | null | null | null | '''
Created on 30 Jan. 2012
Finished on 6 Feb. 2012
Improvements:
- 31 Mar. 2020 to 31 Mar. 2020: fixed compatibility issues of Matplotlib and Tinker on OS X machines.
- 19 Nov. 2019 to 22 Nov. 2019: rewrote Programming for Economists ipy_lib to Python 3.7 and merged with this one.
- 15 Nov. 2019 to 18 Nov. 2019: rewrote ipy_lib to Python 3.7 and fixed compatibility issues.
- 1 Mar. 2012 to 2 Mar. 2012: fixed a rare threading related crash
- 3 Mar. 2012 to 5 Mar. 2012: fixed a bug in showing names of the barchart
- 17 Mar. 2012 to 18 Mar. 2012: fixed not running on Linux
- 31 Jul. 2012 to 31 Jul. 2012: added UserInput and 'privatised' most classes and imports
- 1 Aug. 2012 to 2 Aug. 2012: fixed another bug with showing names of the barchart and a bug with displaying text in othello
- 4 Aug. 2012 to 4 Aug. 2012: fixed bug with opening a file and fixed functionality of closing the window
- 6 Aug. 2012 to 7 Aug. 2012: fixed multiple windows crashing the UI, reverted change to UserInput with this change
- 21 Aug. 2012 to 21 Aug. 2012: adjusted naming from JAVA to Python convention, changed UserInput to a function that returns all input, added Life interface
- 22 Aug. 2012 to 22 Aug. 2012: added scrollbar to othello, snake and life interfaces, added type checking and exceptions for all input
- 2 Sep. 2012 to 2 Sep. 2012: fixed another bug with names of the barchart, allowed ints to be given to floats, fixed spelling
- 13 Sep. 2012 to 13 Sep. 2012: fixed more spelling, added functionality for multiple answers per question
- 27 Sep. 2012 to 27 Sep. 2012: changed multiple answers from array to arbitrary arguments list, added exception if argument list is empty
- 6 Dec. 2012 to 6. Dec. 2012: fixed resets of auto alarm speed by adding a timer
- 2 Oct. 2013 to 3. Oct. 2013: fixed ranged errors, fixed closing bug in Windows and Linux when only calling ask_user or file_input,
fixed typos, added Escape as window closer, fixed window not getting focus when started, added Mac support (!)
- 9 Oct. 2013 to 9. Oct. 2013: fixed get_event (Mac version) to properly give refresh events
- 12 Nov. 2014 to 12. Nov. 2014: fixed OS X to not use PIL anymore and instead of images draw some simple shapes
- 21 Nov. 2014 to 21. Nov. 2014: fixed OS X BarChartUI to properly show bar names without calling show
- 15 May. 2015 to 15 May. 2015: added user interfaces for programming for economy -- Sebastian
- 22 Jun. 2015 to 22 Jun. 2015: fixed asking twice for file_input on Windows -- Gerben
@author: Gerben Rozie
@author: Sebastian Osterlund
@author: Sander Benoist
'''
import tkinter as _tk
import tkinter.dialog as _Dialog
import tkinter.filedialog as _tkFileDialog
import tkinter.messagebox as _tkMessageBox
import queue as _Queue
import threading as _threading
import time as _time
import os as _os
import random as _random
import sys as _sys
import datetime as _datetime
import pickle as _pickle
import urllib.request, urllib.error, urllib.parse
import urllib.request, urllib.parse, urllib.error
import json
if _os.environ.get('DISPLAY','') == '':
_os.environ.__setitem__('DISPLAY', ':0.0')
have_mpl = False
try:
import matplotlib as mpl
if _sys.platform == 'darwin': # darwin == OS X
mpl.use('TkAgg') # if TkAgg doesn't fix it for the student, try QT4Agg
if _sys.platform == 'linux' or _sys.platform == 'linux2':
mpl.rcParams['backend'] = 'QT4Agg'
import pylab as plt
if _sys.platform == 'linux' or _sys.platform == 'linux2':
plt.switch_backend('QT4Agg') # Use QT4 for linux. Bug in TK.
have_mpl = True
except ImportError:
print("Could not import matplotlib. HouseMarketUserInterface and StockMarketUserInterface have been disabled.")
YAHOO_URL = 'https://query.yahooapis.com/v1/public/yql'
ALPHA_VANTAGE_URL = 'http://www.alphavantage.co/query'
class _IPyException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def _verify_int(value_var, string_var, minimum=None, maximum=None):
if not isinstance(value_var, int):
value = "%s not an int for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
_verify_input(value_var, string_var, minimum, maximum)
def _verify_float(value_var, string_var, minimum=None, maximum=None):
if not isinstance(value_var, float):
if not isinstance(value_var, int):
value = "%s is not a float or int for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
_verify_input(value_var, string_var, minimum, maximum)
def _verify_str(value_var, string_var):
if not isinstance(value_var, str):
value = "%s is not a string for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
def _verify_bool(value_var, string_var):
if not isinstance(value_var, bool):
value = "%s is not a boolean for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
def _verify_input(value_var, string_var, minimum=None, maximum=None):
if minimum is None:
minimum = float('-inf')
if maximum is None:
maximum = float('inf')
if value_var >= minimum:
if value_var <= maximum:
return
value = "%s is out of bounds, expected range: %s to %s, got: %s" % (string_var, minimum, maximum, value_var)
raise _IPyException(value)
class _OthelloReplayHolder(object):
# used in the queue to hold values of the changes to be made
def __init__(self, x, y, color):
self.x = x
self.y = y
self.color = color
class _BarChartHolder(object):
# used in the queue to hold values of the changes to be made
def __init__(self, bar_index):
self.bar_index = bar_index
class _BarChartNameHolder(object):
# used in the queue to hold values of the changes to be made
def __init__(self, bar_index, bar_name):
self.bar_index = bar_index
self.bar_name = bar_name
class _SnakeHolder(object):
def __init__(self, x, y, color):
self.x = x
self.y = y
self.color = color
class _LifeHolder(object):
def __init__(self, x, y, color):
self.x = x
self.y = y
self.color = color
_ui_factory = None
def file_input():
"""This function lets the user select a file to use for input.
Returns the file contents in a string.
"""
global _ui_factory
f = _AskInput(_ui_factory.mainroot).f
if f == '':
return None
return str(_sys.stdin.read())
def ask_user(question, *options):
"""Ask the user a question.
Parameters:
- question: the string to ask the user
- options: arbitrary list of arguments (at least 1)
Returns the chosen option by the user or None if nothing was chosen (e.g. hit Escape).
"""
if len(options) == 0:
value = "User needs to be able to select at least 1 answer"
raise _IPyException(value)
global _ui_factory
return _AskUser(_ui_factory.mainroot, question, options).answer
class _Factory():
def __init__(self):
self.mainroot = _tk.Tk()
self.mainroot.withdraw()
self.mainroot.update()
class _AskInput(object):
def __init__(self, mainroot):
root = _tk.Toplevel(mainroot)
root.withdraw()
self.f = _tkFileDialog.askopenfilename(parent=root)
if self.f is not '':
_sys.stdin = open(self.f)
root.destroy()
class _AskUser(object):
def __init__(self, mainroot, question, options):
root = _tk.Toplevel(mainroot)
root.withdraw()
dg = _Dialog.Dialog(None,
title="",
text=question,
default=0,
bitmap=_tkMessageBox.QUESTION,
strings=options)
self.answer = options[dg.num]
root.destroy()
class OthelloReplayUserInterface(object):
def __init__(self, scale=1.0):
"""This class starts the OthelloReplayUserInterface.
Constants:
- NUMBER_OF_ROWS
- NUMBER_OF_COLUMNS
- EMPTY
- WHITE
- BLACK
Parameters for the class: (none)
Optional parameters:
- scale: 0.25 to 1.0
"""
_verify_float(scale, 'Scale', 0.25, 1.0)
global _ui_factory
self.othello_replay = _Othello(_ui_factory.mainroot, scale)
self.NUMBER_OF_ROWS = _Othello.NUMBER_OF_ROWS
self.NUMBER_OF_COLUMNS = _Othello.NUMBER_OF_COLUMNS
self.EMPTY = _Othello.EMPTY
self.WHITE = _Othello.WHITE
self.BLACK = _Othello.BLACK
def place(self, x, y, color):
"""Place an Othello piece (defined by 'color') on the given X and Y coordinates.
"""
_verify_int(x, 'X', 0, self.NUMBER_OF_COLUMNS - 1)
_verify_int(y, 'Y', 0, self.NUMBER_OF_ROWS - 1)
# 0 = empty, 1 = white, 2 = black, 3 = white_t, 4 = black_t
_verify_int(color, 'Color', 0, 4)
self.othello_replay.place(x, y, color)
def place_transparent(self, x, y, color):
"""Place a semi-transparent Othello piece (defined by 'color') on the given X and Y coordinates.
"""
_verify_int(x, 'X', 0, self.NUMBER_OF_COLUMNS - 1)
_verify_int(y, 'Y', 0, self.NUMBER_OF_ROWS - 1)
# 0 = empty, 1 = white_t, 2 = black_t (before next step in code)
_verify_int(color, 'Color', 0, 2)
if color == self.EMPTY:
self.place(x, y, self.EMPTY)
else:
self.place(x, y, color + 2)
def clear(self):
"""Clears the display.
Note: this does not clear the text area!
"""
self.othello_replay.clear()
def show(self):
"""Show the changes made to the display (i.e. after calling place or clear).
"""
self.othello_replay.show()
def print_(self, text):
"""Print text to the text area on the display.
This function does not add a trailing newline by itself.
"""
_verify_str(text, "Text")
self.othello_replay.print_(text)
def clear_text(self):
"""Clears the text area on the display.
"""
self.othello_replay.clear_text()
def wait(self, ms):
"""Let your program wait for an amount of milliseconds.
This function only guarantees that it will wait at least this amount of time.
If the system, i.e., is too busy, then this time might increase.
- Python time module.
"""
_verify_int(ms, "Waiting time", 0)
self.othello_replay.wait(ms)
def close(self):
"""Closes the display and stops your program.
"""
self.othello_replay.close()
def stay_open(self):
"""Force the window to remain open.
Only has effect on Mac OS to prevent the window from closing after the execution finishes.
Make sure that this is the last statement you call when including it because the code does NOT continue after this.
"""
global _ui_factory
_ui_factory.mainroot.mainloop()
class _Othello(object):
# one cannot prevent users from editing 'constants', as constants simply do not exist in Python
NUMBER_OF_ROWS = 8
NUMBER_OF_COLUMNS = 8
EMPTY = 0
WHITE = 1
BLACK = 2
r = 20
g = 120
b = 0
BACKGROUND = "#%02X%02X%02X" % (r, g, b) # BACKGROUND = "#147800"?
def __init__(self, mainroot, scale=1.0):
# create queue to store changes to placings
self.to_show_queue = _Queue.Queue(maxsize=0)
# start the main window
self.root = _tk.Toplevel(mainroot)
self.root.title("OthelloReplayUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.root.resizable(False, False)
# calculate sizes
self.text_height = int(150 * scale)
self.othello_size = int(800 * scale)
# create main frame
self.frame = _tk.Frame(self.root, width=self.othello_size, height=self.othello_size + self.text_height)
self.frame.pack_propagate(0)
self.frame.pack()
# create board to hold references to othello-pieces
self.white_board = [] # for storing references to create_image
self.black_board = []
self.white_ghost_board = []
self.black_ghost_board = []
self.img_refs = [] # for storing references to images - order: white, black
# create and fill the canvas --> paintable area
self.c = _tk.Canvas(self.frame, width=self.othello_size, height=self.othello_size, bg=self.BACKGROUND, bd=0,
highlightthickness=0)
self.c.pack()
self.c.focus_set()
self.fill_canvas()
# create the textholder
self.scrollbar = _tk.Scrollbar(self.frame)
self.scrollbar.pack(side=_tk.RIGHT, fill=_tk.Y)
self.textarea = _tk.Text(self.frame, yscrollcommand=self.scrollbar.set, width=self.othello_size)
self.textarea.pack(side=_tk.LEFT, fill=_tk.BOTH)
self.scrollbar.config(command=self.textarea.yview)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def place(self, x, y, color):
element = _OthelloReplayHolder(x, y, color)
self.to_show_queue.put(element)
def clear(self):
for x in range(self.NUMBER_OF_COLUMNS):
for y in range(self.NUMBER_OF_ROWS):
self.place(x, y, self.EMPTY)
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
position = []
position.append(self.white_board[element.x][element.y])
position.append(self.black_board[element.x][element.y])
position.append(self.white_ghost_board[element.x][element.y])
position.append(self.black_ghost_board[element.x][element.y])
for i in range(len(position)):
if element.color == i + 1:
for e in position[i]:
self.c.itemconfig(e, state=_tk.NORMAL)
else:
for e in position[i]:
self.c.itemconfig(e, state=_tk.HIDDEN)
except _Queue.Empty:
pass
global _ui_factory
_ui_factory.mainroot.update()
def print_(self, text):
self.textarea.config(state=_tk.NORMAL)
self.textarea.insert(_tk.END, text)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def clear_text(self):
self.textarea.config(state=_tk.NORMAL)
self.textarea.delete(1.0, _tk.END)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
def close(self):
self.root.destroy()
_os._exit(0)
def create_othello_grid(self):
for i in range(self.NUMBER_OF_COLUMNS + 1):
x0 = self.xpad + self.xstep * i
y0 = self.ypad
x1 = x0
y1 = self.ypad + self.ystep * self.NUMBER_OF_ROWS + 1
coords = x0, y0, x1, y1
self.c.create_line(coords, fill='black')
for j in range(self.NUMBER_OF_ROWS + 1):
x0 = self.xpad
y0 = self.ypad + self.ystep * j
x1 = self.xpad + self.xstep * self.NUMBER_OF_COLUMNS + 1
y1 = y0
coords = x0, y0, x1, y1
self.c.create_line(coords, fill='black')
for i in range(self.NUMBER_OF_COLUMNS):
x0 = self.xpad + self.xstep / 2 + self.xstep * i
y0 = self.ypad / 2
x1 = x0
y1 = self.othello_size - self.ystep / 2
coords0 = x0, y0
coords1 = x1, y1
self.c.create_text(coords0, text=chr(ord('a') + i))
self.c.create_text(coords1, text=chr(ord('a') + i))
for j in range(self.NUMBER_OF_ROWS):
x0 = int(self.xpad / 2)
y0 = self.ypad + self.ystep / 2 + self.ystep * j
x1 = self.othello_size - self.xstep / 2
y1 = y0
coords0 = x0, y0
coords1 = x1, y1
self.c.create_text(coords0, text='%s' % (j + 1))
self.c.create_text(coords1, text='%s' % (j + 1))
def mix_color(self, c1, c2, mix):
return c1 if mix == 0 else int((c1 + c2) / 2)
def create_piece(self, x0, y0, img, mix):
result = []
if img == self.WHITE:
r = self.mix_color(255, self.r, mix)
g = self.mix_color(255, self.g, mix)
b = self.mix_color(255, self.b, mix)
scale = 0.8
x1 = x0 + (1.0 - scale) / 2.0 * self.xstep
y1 = y0 + (1.0 - scale) / 2.0 * self.ystep
x2 = x0 + (1.0 - (1.0 - scale) / 2.0) * self.xstep
y2 = y0 + (1.0 - (1.0 - scale) / 2.0) * self.ystep
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
if img == self.BLACK:
r = self.mix_color(0, self.r, mix)
g = self.mix_color(0, self.g, mix)
b = self.mix_color(0, self.b, mix)
scale = 0.8
x1 = x0 + (1.0 - scale) / 2.0 * self.xstep
y1 = y0 + (1.0 - scale) / 2.0 * self.ystep
x2 = x0 + (1.0 - (1.0 - scale) / 2.0) * self.xstep
y2 = y0 + (1.0 - (1.0 - scale) / 2.0) * self.ystep
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
return result
def create_othello_pieces(self):
mixer = 0, 0, 1, 1
imgtype = self.WHITE, self.BLACK, self.WHITE, self.BLACK
boards = self.white_board, self.black_board, self.white_ghost_board, self.black_ghost_board
for n in range(len(boards)):
for i in range(self.NUMBER_OF_COLUMNS):
boards[n].append([])
for j in range(self.NUMBER_OF_ROWS):
x0 = self.xpad + self.xstep * i
y0 = self.ypad + self.ystep * j
img = self.create_piece(x0, y0, imgtype[n], mixer[n])
boards[n][i].append(img)
def fill_canvas(self):
self.xstep = int(self.othello_size / (self.NUMBER_OF_COLUMNS + 2))
self.ystep = int(self.othello_size / (self.NUMBER_OF_ROWS + 2))
self.xpad = self.othello_size - self.NUMBER_OF_COLUMNS * self.xstep / 2 - self.othello_size / 2
self.ypad = self.othello_size - self.NUMBER_OF_ROWS * self.ystep / 2 - self.othello_size / 2
self.create_othello_grid()
self.create_othello_pieces()
class BarChartUserInterface(object):
def __init__(self, bar_count):
"""This class starts the BarChartUserInterface.
Constants: (none)
Parameters for the class:
- bar_count: at least 1
Optional parameters: (none)
"""
_verify_int(bar_count, "Bar count", 1)
global _ui_factory
self.bar_chart = _BarChart(bar_count, _ui_factory.mainroot)
def set_bar_name(self, bar_index, text):
"""Set a name, provided by 'text', to a given bar_index.
Note: this function's effects are visible without calling show.
"""
_verify_int(bar_index, "Bar index", 0, self.bar_chart.bar_count - 1)
_verify_str(text, "Text")
self.bar_chart.set_bar_name(bar_index, text)
def raise_bar(self, bar_index):
"""Increment the given bar_index by 1.
"""
_verify_int(bar_index, "Bar index", 0, self.bar_chart.bar_count - 1)
self.bar_chart.raise_bar(bar_index)
def show(self):
"""Show the changes made to the display (i.e. after calling raise_bar).
"""
self.bar_chart.show()
def show_names(self, value):
"""Whether or not to show the names of the bars.
Value given must be a boolean.
Default at start is False.
"""
_verify_bool(value, "Show names")
self.bar_chart.show_names(value)
def show_values(self, value):
"""Whether or not to show the values of the bars.
Value given must be a boolean.
Default at start is True.
"""
_verify_bool(value, "Show values")
self.bar_chart.show_values(value)
def wait(self, ms):
"""Let your program wait for an amount of milliseconds.
This function only guarantees that it will wait at least this amount of time.
If the system, i.e., is too busy, then this time might increase.
- Python time module.
"""
_verify_int(ms, "Waiting time", 0)
self.bar_chart.wait(ms)
def close(self):
"""Closes the display and stops your program.
"""
self.bar_chart.close()
def stay_open(self):
"""Force the window to remain open.
Only has effect on Mac OS to prevent the window from closing after the execution finishes.
Make sure that this is the last statement you call when including it because the code does NOT continue after this.
"""
global _ui_factory
_ui_factory.mainroot.mainloop()
class _BarChart(object):
def __init__(self, bar_count, mainroot):
# create queue to store changes to placings
self.to_show_queue = _Queue.Queue(maxsize=0)
# variables used to keep the number of refreshes of names and values in check
self.show_names_bool = False
self.show_values_bool = True
self.bar_count = bar_count
# start the main window
self.root = _tk.Toplevel(mainroot)
self.root.title("BarChartUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.frame = _tk.Frame(self.root)
self.frame.pack(fill=_tk.BOTH, expand=_tk.YES)
self.height = 575
self.width = 400
self.c = _tk.Canvas(self.frame, width=self.width, height=self.height, bg='white', bd=0, highlightthickness=0)
self.c.pack(fill=_tk.BOTH, expand=_tk.YES)
self.c.focus_set()
self.c.bind('<Configure>', self.redraw)
self.bar_max = 0
self.bars = []
self.names = []
self.create_bars()
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def set_bar_name(self, bar_index, text):
self.names[bar_index] = text;
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def raise_bar(self, bar_index):
element = _BarChartHolder(bar_index)
self.to_show_queue.put(element)
def inc_bar(self, bar_index):
if (self.bars[bar_index] + 1) > self.bar_max:
self.bar_max = self.bars[bar_index] + 1
self.bars[bar_index] += 1
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
self.inc_bar(element.bar_index)
except _Queue.Empty:
pass
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def show_names(self, value):
self.show_names_bool = value
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def show_values(self, value):
self.show_values_bool = value
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
global _ui_factory
_ui_factory.mainroot.update()
def close(self):
self.root.destroy()
_os._exit(0)
return
def create_bars(self):
for i in range(self.bar_count): # @UnusedVariable
self.bars.append(0)
self.names.append('')
def redraw(self, event=None):
if event != None:
self.width = event.width
self.height = event.height
for e in self.c.find_all():
self.c.delete(e)
self.fill_canvas()
def fill_canvas(self):
xstep = int(self.width / (self.bar_count + 2))
xpad = int((self.width - xstep * self.bar_count) / 2) #- self.width / 2
xspacing = int(xstep / 10)
ypad = int(self.height / 10) #- self.height / 2
ypadtext = int(ypad / 3)
for i in range(self.bar_count):
# draw the bar
x0 = xpad + xstep * i + xspacing
y0 = self.height - ypad
x1 = xpad + xstep * (i + 1) - xspacing
y1 = self.height - ypad
color = 0
if self.bar_max > 0:
y_len = self.bars[i] * int((self.height - 2 * ypad) / self.bar_max)
y1 -= y_len
color = self.bars[i] * int(255 / self.bar_max)
coords = x0, y0, x1, y1
hex_color = "#%02x%02x%02x" % (color, 0, 0) # red, green, blue
self.c.create_rectangle(coords, fill=hex_color)
# draw the values
x1 = xpad + xstep * i + int(xstep / 2)
y1 -= ypadtext
coords = x1, y1
value = ("%d" % self.bars[i]) if self.show_values_bool else ''
self.c.create_text(coords, text=value)
# draw the names
x0 = xpad + xstep * i + int(xstep / 2)
y0 += ypadtext
coords = x0, y0
name = self.names[i] if self.show_names_bool else ''
self.c.create_text(coords, text=name)
class SnakeUserInterface(object):
def __init__(self, width, height, scale=1.0):
"""This class starts the SnakeUserInterface.
Constants:
- EMPTY
- FOOD
- SNAKE
- WALL
Parameters for the class:
- width: at least 1
- height: at least 1
Optional parameters:
- scale: 0.25 to 1.0
"""
_verify_int(width, "Width", 1)
_verify_int(height, "Height", 1)
_verify_float(scale, 'Scale', 0.25, 1.0)
global _ui_factory
self.snake_interface = _Snake(width, height, _ui_factory.mainroot, scale)
self.EMPTY = _Snake.EMPTY
self.FOOD = _Snake.FOOD
self.SNAKE = _Snake.SNAKE
self.WALL = _Snake.WALL
def place(self, x, y, color):
"""Place a Snake piece (defined by 'color') on the given X and Y coordinates.
"""
_verify_int(x, 'X', 0, self.snake_interface.width - 1)
_verify_int(y, 'Y', 0, self.snake_interface.height - 1)
# 0 = empty, 1 = food, 2 = snake, 3 = wall, 4 = food_t, 5 = snake_t, 6 = wall_t
_verify_int(color, 'Color', 0, 6)
self.snake_interface.place(x, y, color)
def place_transparent(self, x, y, color):
"""Place a semi-transparent Snake piece (defined by 'color') on the given X and Y coordinates.
"""
_verify_int(x, 'X', 0, self.snake_interface.width - 1)
_verify_int(y, 'Y', 0, self.snake_interface.height - 1)
# 0 = empty, 1 = food_t, 2 = snake_t, 3 = wall_t (before next step in code)
_verify_int(color, 'Color', 0, 6)
if color == self.EMPTY:
self.place(x, y, self.EMPTY)
else:
self.place(x, y, color + 3)
def clear(self):
"""Clears the display.
Note: this does not clear the text area!
"""
self.snake_interface.clear()
def show(self):
"""Show the changes made to the display (i.e. after calling place or clear)
"""
self.snake_interface.show()
def get_event(self):
"""Returns an event generated from the display.
The returned object has 2 properties:
- name: holds the group which the event belongs to.
- data: holds useful information for the user.
"""
return self.snake_interface.get_event()
def set_animation_speed(self, fps):
"""Set an event to repeat 'fps' times per second.
If the value is set to 0 or less, the repeating will halt.
In theory the maximum value is 1000, but this depends on activity of the system.
The generated events (available by using get_event) have these properties:
- name: 'alarm'.
- data: 'refresh'.
"""
_verify_float(fps, "Animation speed")
self.snake_interface.set_animation_speed(fps)
def print_(self, text):
"""Print text to the text area on the display.
This function does not add a trailing newline by itself.
"""
_verify_str(text, "Text")
self.snake_interface.print_(text)
def clear_text(self):
"""Clears the text area on the display.
"""
self.snake_interface.clear_text()
def wait(self, ms):
"""Let your program wait for an amount of milliseconds.
This function only guarantees that it will wait at least this amount of time.
If the system, i.e., is too busy, then this time might increase.
- Python time module.
"""
_verify_int(ms, "Waiting time", 0)
self.snake_interface.wait(ms)
def random(self, maximum):
"""Picks a random integer ranging from 0 <= x < maximum
Minimum for maximum is 1
"""
_verify_int(maximum, 'Random', 1)
return self.snake_interface.random(maximum)
def close(self):
"""Closes the display and stops your program.
"""
self.snake_interface.close()
def stay_open(self):
"""Force the window to remain open.
Only has effect on Mac OS to prevent the window from closing after the execution finishes.
Make sure that this is the last statement you call when including it because the code does NOT continue after this.
"""
global _ui_factory
_ui_factory.mainroot.mainloop()
class _Snake(object):
# one cannot prevent users from editing 'constants', as constants simply do not exist in Python
EMPTY = 0
FOOD = 1
SNAKE = 2
WALL = 3
def __init__(self, width, height, mainroot, scale=1.0):
# create queue to store changes to placings
self.to_show_queue = _Queue.Queue(maxsize=0)
self.event_queue = _Queue.Queue(maxsize=0)
# copy params
self.width = width
self.height = height
self.scale = scale
self.closing_window = False
# start the main window
self.root = _tk.Toplevel(mainroot)
self.root.title("SnakeUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.root.resizable(False, False)
# calculate sizes
self.size_per_coord = int(25 * scale)
self.text_height = int(100 * scale)
# create main frame
self.frame = _tk.Frame(self.root, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height + self.text_height)
self.frame.pack_propagate(0)
self.frame.pack()
# create board to hold references to snake-pieces
self.food_board = [] # for storing references to create_image
self.snake_board = []
self.wall_board = []
self.food_ghost_board = []
self.snake_ghost_board = []
self.wall_ghost_board = []
self.img_refs = [] # for storing references to images - order: food, snake, wall, food_t, snake_t, wall_t
# create and fill the canvas --> paintable area
self.c = _tk.Canvas(self.frame, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height, bg="black", bd=0, highlightthickness=0)
self.c.pack()
self.last_x = -1 # used to generate mouseOver/Exit events
self.last_y = -1 # used to generate mouseOver/Exit events
self.fill_canvas()
# create the textholder
self.scrollbar = _tk.Scrollbar(self.frame)
self.scrollbar.pack(side=_tk.RIGHT, fill=_tk.Y)
self.textarea = _tk.Text(self.frame, yscrollcommand=self.scrollbar.set)
self.textarea.pack(side=_tk.LEFT, fill=_tk.BOTH)
self.scrollbar.config(command=self.textarea.yview)
self.textarea.config(state=_tk.DISABLED)
self.interval = 0
self.alarm_speed = 0
self.timer = self.milliseconds()
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def milliseconds(self):
return _time.time() * 1000
def place(self, x, y, color):
element = _SnakeHolder(x, y, color)
self.to_show_queue.put(element)
def clear(self):
for x in range(self.width):
for y in range(self.height):
self.place(x, y, self.EMPTY)
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
position = []
position.append(self.food_board[element.x][element.y])
position.append(self.snake_board[element.x][element.y])
position.append(self.wall_board[element.x][element.y])
position.append(self.food_ghost_board[element.x][element.y])
position.append(self.snake_ghost_board[element.x][element.y])
position.append(self.wall_ghost_board[element.x][element.y])
for i in range(len(position)):
# add 1 to i, because 0 is empty [same as doing color - 1]
# thus, if 0, then it doesn't match with 1 to 6
# therefore putting the whole position to hidden
if element.color == i + 1:
for e in position[i]:
self.c.itemconfig(e, state=_tk.NORMAL)
else:
for e in position[i]:
self.c.itemconfig(e, state=_tk.HIDDEN)
except _Queue.Empty:
pass
global _ui_factory
_ui_factory.mainroot.update()
def get_event(self):
global _ui_factory
_ui_factory.mainroot.update()
while True:
try:
self.refresh_event()
event = self.event_queue.get_nowait()
return event
except _Queue.Empty:
wait_time = min(self.interval, 10)
self.wait(wait_time)
_ui_factory.mainroot.update()
def set_animation_speed(self, fps):
current_time = self.milliseconds()
if fps <= 0:
self.interval = 0
self.timer = current_time
return
if fps > 1000:
fps = 1000
self.interval = int(1000.0 / fps)
self.refresh_event()
def print_(self, text):
self.textarea.config(state=_tk.NORMAL)
self.textarea.insert(_tk.END, text)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def clear_text(self):
self.textarea.config(state=_tk.NORMAL)
self.textarea.delete(1.0, _tk.END)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
def close(self):
self.root.destroy()
_os._exit(0)
def random(self, maximum=1):
return int(_random.random() * maximum)
def create_piece(self, x0, y0, img, mix):
result = []
if img == self.FOOD:
r = int(255 / (1 + mix))
g = int(64 / (1 + mix))
b = int(64 / (1 + mix))
scale = 0.8
x1 = x0 + (1.0 - scale) / 2.0 * self.size_per_coord
y1 = y0 + (1.0 - scale) * self.size_per_coord
x2 = x0 + (1.0 - (1.0 - scale) / 2.0) * self.size_per_coord
y2 = y0 + self.size_per_coord
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
r = int(64 / (1 + mix))
g = int(255 / (1 + mix))
b = int(64 / (1 + mix))
scale = 0.4
x1 = x0 + self.size_per_coord / 2.0
y1 = y0
x2 = x1
y2 = y0 + scale * self.size_per_coord
result.append(
self.c.create_line(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=2))
if img == self.SNAKE:
r = int(32 / (1 + mix))
g = int(255 / (1 + mix))
b = int(0 / (1 + mix))
x1 = x0
y1 = y0
x2 = x0 + self.size_per_coord
y2 = y0 + self.size_per_coord
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
if img == self.WALL:
r = int(200 / (1 + mix))
g = int(100 / (1 + mix))
b = int(0 / (1 + mix))
x1 = x0
y1 = y0
x2 = x0 + self.size_per_coord
y2 = y0 + self.size_per_coord
result.append(
self.c.create_rectangle(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
return result
def create_snake_pieces(self):
mixer = 0, 0, 0, 1, 1, 1
imgtype = self.FOOD, self.SNAKE, self.WALL, self.FOOD, self.SNAKE, self.WALL
boards = self.food_board, self.snake_board, self.wall_board, self.food_ghost_board, self.snake_ghost_board, self.wall_ghost_board
for n in range(len(boards)):
for i in range(self.width):
boards[n].append([])
for j in range(self.height):
x0 = self.size_per_coord * i
y0 = self.size_per_coord * j
img = self.create_piece(x0, y0, imgtype[n], mixer[n])
boards[n][i].append(img)
def fill_canvas(self):
self.bind_events()
self.create_snake_pieces()
def motion_event(self, event):
if not self.mouse_on_screen:
return
x_old = self.last_x
y_old = self.last_y
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
x_change = int(x_old) != int(x_new)
y_change = int(y_old) != int(y_new)
if x_change or y_change:
self.generate_event("mouseexit", "%d %d" % (x_old, y_old))
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
def enter_window_event(self, event):
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
self.mouse_on_screen = True
def leave_window_event(self, event):
self.generate_event("mouseexit", "%d %d" % (self.last_x, self.last_y))
self.mouse_on_screen = False
def alt_number_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("alt_number", event.char)
def key_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("number", event.char)
elif ord(event.char) >= ord('a') and ord(event.char) <= ord('z'):
self.generate_event("letter", event.char)
elif ord(event.char) >= ord('A') and ord(event.char) <= ord('Z'):
self.generate_event("letter", event.char)
else:
self.generate_event("other", event.char)
elif event.keysym == 'Up':
self.generate_event("arrow", "u")
elif event.keysym == 'Down':
self.generate_event("arrow", "d")
elif event.keysym == 'Left':
self.generate_event("arrow", "l")
elif event.keysym == 'Right':
self.generate_event("arrow", "r")
elif event.keysym == 'Multi_Key':
return
elif event.keysym == 'Caps_Lock':
self.generate_event("other", "caps lock")
elif event.keysym == 'Num_Lock':
self.generate_event("other", "num lock")
elif event.keysym == 'Shift_L' or event.keysym == 'Shift_R':
self.generate_event("other", "shift")
elif event.keysym == 'Control_L' or event.keysym == 'Control_R':
self.generate_event("other", "control")
elif event.keysym == 'Alt_L' or event.keysym == 'Alt_R':
self.generate_event("other", "alt")
else:
self.generate_event("other", event.keysym)
def click_event(self, event):
x = event.x / self.size_per_coord
y = event.y / self.size_per_coord
self.generate_event("click", "%d %d" % (x, y))
def refresh_event(self):
current_time = self.milliseconds()
threshold = current_time - self.timer - self.interval
if threshold >= 0 and self.interval > 0:
self.generate_event("alarm", "refresh")
self.timer = current_time
def generate_event(self, name, data):
event = Event(name, data)
self.event_queue.put(event)
def bind_events(self):
self.c.focus_set() # to redirect keyboard input to this widget
self.c.bind("<Motion>", self.motion_event)
self.c.bind("<Enter>", self.enter_window_event)
self.c.bind("<Leave>", self.leave_window_event)
self.c.bind("<Alt-Key>", self.alt_number_event)
self.c.bind("<Key>", self.key_event)
self.c.bind("<Button-1>", self.click_event)
class LifeUserInterface(object):
def __init__(self, width, height, scale=1.0):
"""This class starts the LifeUserInterface.
Constants:
- DEAD
- ALIVE
Parameters for the class:
- width: at least 1
- height: at least 1
Optional parameters:
- scale: 0.25 to 1.0
"""
_verify_int(width, "Width", 1)
_verify_int(height, "Height", 1)
_verify_float(scale, 'Scale', 0.25, 1.0)
global _ui_factory
self.life_interface = _Life(width, height, _ui_factory.mainroot, scale)
self.DEAD = _Life.DEAD
self.ALIVE = _Life.ALIVE
def place(self, x, y, color):
"""Place a Life piece (defined by 'color') on the given X and Y coordinates.
"""
_verify_int(x, 'X', 0, self.life_interface.width - 1)
_verify_int(y, 'Y', 0, self.life_interface.height - 1)
# 0 = empty, 1 = dead, 2 = alive
_verify_int(color, 'Color', 0, 2)
self.life_interface.place(x, y, color)
def clear(self):
"""Clears the display.
Note: this does not clear the text area!
"""
self.life_interface.clear()
def show(self):
"""Show the changes made to the display (i.e. after calling place or clear)
"""
self.life_interface.show()
def get_event(self):
"""Returns an event generated from the display.
The returned object has 2 properties:
- name: holds the group which the event belongs to.
- data: holds useful information for the user.
"""
return self.life_interface.get_event()
def set_animation_speed(self, fps):
"""Set an event to repeat 'fps' times per second.
If the value is set to 0 or less, the repeating will halt.
In theory the maximum value is 1000, but this depends on activity of the system.
The generated events (available by using get_event) have these properties:
- name: 'alarm'.
- data: 'refresh'.
"""
_verify_float(fps, "Animation speed")
self.life_interface.set_animation_speed(fps)
def print_(self, text):
"""Print text to the text area on the display.
This function does not add a trailing newline by itself.
"""
_verify_str(text, "Text")
self.life_interface.print_(text)
def clear_text(self):
"""Clears the text area on the display.
"""
self.life_interface.clear_text()
def wait(self, ms):
"""Let your program wait for an amount of milliseconds.
This function only guarantees that it will wait at least this amount of time.
If the system, i.e., is too busy, then this time might increase.
- Python time module.
"""
_verify_int(ms, "Waiting time", 0)
self.life_interface.wait(ms)
def random(self, maximum):
"""Picks a random integer ranging from 0 <= x < maximum
Minimum for maximum is 1
"""
_verify_int(maximum, 'Random', 1)
return self.life_interface.random(maximum)
def close(self):
"""Closes the display and stops your program.
"""
self.life_interface.close()
def stay_open(self):
"""Force the window to remain open.
Only has effect on Mac OS to prevent the window from closing after the execution finishes.
Make sure that this is the last statement you call when including it because the code does NOT continue after this.
"""
global _ui_factory
_ui_factory.mainroot.mainloop()
class _Life(object):
# one cannot prevent users from editing 'constants', as constants simply do not exist in Python
DEAD = 0
ALIVE = 1
BACKGROUND = "#000000"
def __init__(self, width, height, mainroot, scale=1.0):
# create queue to store changes to placings
self.to_show_queue = _Queue.Queue(maxsize=0)
self.event_queue = _Queue.Queue(maxsize=0)
# copy params
self.width = width
self.height = height
self.scale = scale
# start the main window
self.root = _tk.Toplevel(mainroot)
self.root.title("LifeUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.root.resizable(False, False)
# calculate sizes
self.size_per_coord = int(25 * scale)
self.text_height = int(100 * scale)
# create main frame
self.frame = _tk.Frame(self.root, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height + self.text_height)
self.frame.pack_propagate(0)
self.frame.pack()
# create board to hold references to snake-pieces
self.dead_board = [] # for storing references to create_image
self.alive_board = []
self.img_refs = [] # for storing references to images - order: dead, alive
# create and fill the canvas --> paintable area
self.c = _tk.Canvas(self.frame, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height, bg=self.BACKGROUND, bd=0, highlightthickness=0)
self.c.pack()
self.last_x = -1 # used to generate mouseOver/Exit events
self.last_y = -1 # used to generate mouseOver/Exit events
self.fill_canvas()
# create the textholder
self.scrollbar = _tk.Scrollbar(self.frame)
self.scrollbar.pack(side=_tk.RIGHT, fill=_tk.Y)
self.textarea = _tk.Text(self.frame, yscrollcommand=self.scrollbar.set)
self.textarea.pack(side=_tk.LEFT, fill=_tk.BOTH)
self.scrollbar.config(command=self.textarea.yview)
self.textarea.config(state=_tk.DISABLED)
self.interval = 0
self.alarm_speed = 0
self.timer = self.milliseconds()
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def milliseconds(self):
return _time.time() * 1000
def place(self, x, y, color):
element = _LifeHolder(x, y, color)
self.to_show_queue.put(element)
def clear(self):
for x in range(self.width):
for y in range(self.height):
self.place(x, y, self.DEAD)
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
position = []
position.append(self.dead_board[element.x][element.y])
position.append(self.alive_board[element.x][element.y])
for i in range(len(position)):
if element.color == i:
for e in position[i]:
self.c.itemconfig(e, state=_tk.NORMAL)
else:
for e in position[i]:
self.c.itemconfig(e, state=_tk.HIDDEN)
except _Queue.Empty:
pass
global _ui_factory
_ui_factory.mainroot.update()
def get_event(self):
global _ui_factory
_ui_factory.mainroot.update()
while True:
try:
self.refresh_event()
event = self.event_queue.get_nowait()
return event
except _Queue.Empty:
wait_time = min(self.interval, 10)
self.wait(wait_time)
_ui_factory.mainroot.update()
def set_animation_speed(self, fps):
current_time = self.milliseconds()
if fps <= 0:
self.interval = 0
self.timer = current_time
return
if fps > 1000:
fps = 1000
self.interval = int(1000.0 / fps)
self.refresh_event()
def print_(self, text):
self.textarea.config(state=_tk.NORMAL)
self.textarea.insert(_tk.END, text)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def clear_text(self):
self.textarea.config(state=_tk.NORMAL)
self.textarea.delete(1.0, _tk.END)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
def close(self):
self.root.destroy()
_os._exit(0)
def random(self, maximum=1):
return int(_random.random() * maximum)
def create_piece(self, x0, y0, img, state_):
result = []
if img == self.DEAD:
r = 255
g = 255
b = 255
x1 = x0
y1 = y0
# -1 from the second coordinate because the bottom and right borders are 1 pixel outside the boundary
x2 = x0 + self.size_per_coord - 1
y2 = y0 + self.size_per_coord - 1
result.append(
self.c.create_rectangle(x1, y1, x2, y2, state=state_, fill="#%02X%02X%02X" % (r, g, b), width=1))
if img == self.ALIVE:
r = 0
g = 0
b = 255
x1 = x0
y1 = y0
# -1 from the second coordinate because the bottom and right borders are 1 pixel outside the boundary
x2 = x0 + self.size_per_coord - 1
y2 = y0 + self.size_per_coord - 1
result.append(
self.c.create_rectangle(x1, y1, x2, y2, state=state_, fill="#%02X%02X%02X" % (r, g, b), width=1))
return result
def create_life_pieces(self):
imgtype = self.DEAD, self.ALIVE
boards = self.dead_board, self.alive_board
for n in range(len(boards)):
for i in range(self.width):
boards[n].append([])
for j in range(self.height):
x0 = self.size_per_coord * i
y0 = self.size_per_coord * j
state_ = _tk.HIDDEN
if n == 0:
state_ = _tk.NORMAL
img = self.create_piece(x0, y0, imgtype[n], state_)
boards[n][i].append(img)
def fill_canvas(self):
self.bind_events()
self.create_life_pieces()
def motion_event(self, event):
if not self.mouse_on_screen:
return
x_old = self.last_x
y_old = self.last_y
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
x_change = int(x_old) != int(x_new)
y_change = int(y_old) != int(y_new)
if x_change or y_change:
self.generate_event("mouseexit", "%d %d" % (x_old, y_old))
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
def enter_window_event(self, event):
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
self.mouse_on_screen = True
def leave_window_event(self, event):
self.generate_event("mouseexit", "%d %d" % (self.last_x, self.last_y))
self.mouse_on_screen = False
def alt_number_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("alt_number", event.char)
def key_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("number", event.char)
elif ord(event.char) >= ord('a') and ord(event.char) <= ord('z'):
self.generate_event("letter", event.char)
elif ord(event.char) >= ord('A') and ord(event.char) <= ord('Z'):
self.generate_event("letter", event.char)
else:
self.generate_event("other", event.char)
elif event.keysym == 'Up':
self.generate_event("arrow", "u")
elif event.keysym == 'Down':
self.generate_event("arrow", "d")
elif event.keysym == 'Left':
self.generate_event("arrow", "l")
elif event.keysym == 'Right':
self.generate_event("arrow", "r")
elif event.keysym == 'Multi_Key':
return
elif event.keysym == 'Caps_Lock':
self.generate_event("other", "caps lock")
elif event.keysym == 'Num_Lock':
self.generate_event("other", "num lock")
elif event.keysym == 'Shift_L' or event.keysym == 'Shift_R':
self.generate_event("other", "shift")
elif event.keysym == 'Control_L' or event.keysym == 'Control_R':
self.generate_event("other", "control")
elif event.keysym == 'Alt_L' or event.keysym == 'Alt_R':
self.generate_event("other", "alt")
else:
self.generate_event("other", event.keysym)
def click_event(self, event):
x = event.x / self.size_per_coord
y = event.y / self.size_per_coord
self.generate_event("click", "%d %d" % (x, y))
def refresh_event(self):
current_time = self.milliseconds()
threshold = current_time - self.timer - self.interval
if threshold >= 0 and self.interval > 0:
self.generate_event("alarm", "refresh")
self.timer = current_time
def generate_event(self, name, data):
event = Event(name, data)
self.event_queue.put(event)
def bind_events(self):
self.c.focus_set() # to redirect keyboard input to this widget
self.c.bind("<Motion>", self.motion_event)
self.c.bind("<Enter>", self.enter_window_event)
self.c.bind("<Leave>", self.leave_window_event)
self.c.bind("<Alt-Key>", self.alt_number_event)
self.c.bind("<Key>", self.key_event)
self.c.bind("<Button-1>", self.click_event)
class Event(object):
def __init__(self, name, data):
"""This class holds the name and data for each event in their respective variables.
Variables:
- name
- data
Example to access with SnakeUserInterface:
ui = SnakeUserInterface(5,5) # 5 by 5 grid for testing purposes
your_variable = ui.get_event() # code will block untill an event comes
# your_variable now points to an event
print your_variable.name, your_variable.data
List of events:
- name: mouseover
data: x and y coordinates (as integers), separated by a space
generated when mouse goes over a coordinate on the window
- name: mouseexit
data: x and y coordinates (as integers), separated by a space
generated when mouse exits a coordinate on the window
- name: click
data: x and y coordinates (as integers), separated by a space
generated when the user clicks on a coordinate on the window
- name: alarm
data: refresh
generated as often per second as the user set the animation speed to; note that the data is exactly as it says: "refresh"
- name: letter
data: the letter that got pressed
generated when the user presses on a letter (A to Z; can be lowercase or uppercase depending on shift/caps lock)
- name: number
data: the number (as a string) that got pressed
generated when the user presses on a number (0 to 9)
- name: alt_number
data: the number (as a string) that got pressed
generated when the user presses on a number (0 to 9) while at the same time pressing the Alt key
- name: arrow
data: the arrow key that got pressed, given by a single letter
generated when the user presses on an arrow key, data is then one of: l, r, u, d
- name: other
data: data depends on key pressed
generated when the user pressed a different key than those described above
possible data:
- caps_lock
- num_lock
- alt
- control
- shift
more data can exist and are recorded (read: they generate events), but not documented
"""
self.name = name
self.data = data
class StockMarketUserInterface(object):
def __init__(self, enable_cache=False):
"""
User interface for the stocks assignment.
Variables:
enable_cache: if set to True retrieved data will be cached.
"""
if not have_mpl:
raise Exception('Use of HouseMarketUserInterface has been disabled.')
self._enable_cache = enable_cache
pass
def _yql_query(self, q, _format, env):
req = {
'q': q,
'format': _format,
'env': env
}
data = urllib.parse.urlencode(req)
whole_url = YAHOO_URL + '?' + data
request = urllib.request.Request(whole_url)
handler = urllib.request.urlopen(request)
response = json.loads(handler.read())
return response
def _av_query(self, symbol):
whole_url = ALPHA_VANTAGE_URL + "?function=TIME_SERIES_DAILY_ADJUSTED&apikey=Z2YF&symbol=%s&outputsize=full" % symbol
request = urllib.request.Request(whole_url)
handler = urllib.request.urlopen(request)
response = json.loads(handler.read())
if 'Error Message' in response: # retry once... AV fails... decently often
request = urllib.request.Request(whole_url)
handler = urllib.request.urlopen(request)
response = json.loads(handler.read())
return response
def _check_time_interval(self, start, end):
st = _time.strptime(start, "%Y-%m-%d")
en = _time.strptime(end, "%Y-%m-%d")
ds = _datetime.datetime.fromtimestamp(_time.mktime(st))
de = _datetime.datetime.fromtimestamp(_time.mktime(en))
# ddays = (de - ds).days
#
# if ddays > 365:
# raise Exception("The largest time interval the API can handle is 365 days.")
def _load_cache(self, key):
try:
fp = open(".stock_cache", "rb")
db = _pickle.load(fp)
return db.get(key, None)
except Exception:
return None
def _store_cache(self, key, value):
db = {}
try:
with open(".stock_cache", "rb") as fp:
try:
db = _pickle.load(fp)
except Exception:
pass
except Exception:
pass
with open(".stock_cache", "wb+") as fp:
db[key] = value
_pickle.dump(db, fp)
def _cache_hash(self, symbol, start, end):
return symbol + start + end
def _av_rekey(self, dictionary):
rekey = {
'Adj_Close': '5. adjusted close', # for the original assignment
'open': '1. open',
'high': '2. high',
'low': '3. low',
'close': '4. close',
'volume': '6. volume'
}
new = {}
for v, k in rekey.items():
if k in dictionary:
new[v] = float(dictionary[k])
return new
def get_stock_quotes(self, symbol, start, end):
"""
Returns a list of dictionaries containing Yahoo historical stock quotes for variable 'symbol'.
Variables:
- symbol: (stock symbol e.g. AAPL, IBM, MSFT)
- start: start date of historical interval. Format: yyyy-mm-dd
- end: end date of historical interval. Format: yyyy-mm-dd
The Yahoo API supports a max time interval of 365 day, thus an exception is raised if
the interval between start and end > 365 days.
"""
self._check_time_interval(start, end)
if self._enable_cache:
cached = self._load_cache(self._cache_hash(symbol, start, end))
if cached:
return cached
response = self._av_query(symbol)
if 'Error Message' in response:
raise Exception("No data available for quote symbol %s." % symbol)
results = response['Time Series (Daily)'] # type: dict
# fuck its not sorted
st = _time.strptime(start, "%Y-%m-%d")
sp = _time.strptime(end, "%Y-%m-%d")
quotes = [t for t in [(_time.strptime(x[0].split()[0], "%Y-%m-%d"), x[1]) for x in list(results.items())] if sp >= t[0] >= st]
formatted_quotes = [self._av_rekey(x[1]) for x in sorted(quotes,key=lambda x: x[0], reverse=True)]
if self._enable_cache:
self._store_cache(self._cache_hash(symbol, start, end), formatted_quotes)
return formatted_quotes
def plot(self, prices, color, **kwargs):
"""
Plots the list of prices. With the color specified by the string 'color'.
Possible colors: 'b', 'g', 'r'
Use show() to display the plotted data.
Variables:
prices: list of floats with prices to be plotted.
**kwargs: (optional) additional kwargs.
"""
t = plt.arange(0, len(prices), 1)
lines = plt.plot(t, prices, c=color)
kwargs['linewidth'] = 2.0
plt.setp(lines, **kwargs)
return lines
def show(self):
"""
Draw the current state of the ui.
"""
plt.ylabel('Returns')
plt.xlabel('Day')
plt.show()
class HouseMarketUserInterface(object):
def __init__(self):
if not have_mpl:
raise Exception('Use of HouseMarketUserInterface has been disabled.')
self.max_x = 0 # Keep track of max observer x-value
def plot_dot(self, x, y, color, **kwargs):
"""
Plot the point (x,y) in the ui. With the color specified by the string 'color'.
Possible colors: 'b', 'g', 'r'
Arguments:
x: float
y: float
Advanced functionality: a list of floats may be supplied to both x and y to draw many points in one step.
"""
if isinstance(x, list):
self.max_x = max(max(x), self.max_x)
else:
self.max_x = max(x, self.max_x)
plt.plot(x, y, 'o', c=color, **kwargs)
def plot_line(self, *args, **kwargs):
"""
Plot the polynomial represented by the coefficients provided.
E.g. plot_line(2,1) would plot the function '2 + 1 * x'
plot_line(3,4,5) plots '5*x^2 + 4*x + 3'
"""
t = plt.arange(0.0, self.max_x, 0.01)
func = lambda x: sum([args[i] * (x ** i) for i in range(len(args))])
return plt.plot(t, func(t), **kwargs)
def show(self):
"""
Draw the current state of the ui.
"""
plt.ylabel('House Price')
plt.xlabel('House Size (m^2)')
orig_limit_x = plt.xlim()
orig_limit_y = plt.ylim()
a = plt.xlim(orig_limit_x[0], self.max_x + 0.1 * self.max_x)
a = plt.ylim(orig_limit_y[0] - 0.1 * orig_limit_y[0], orig_limit_y[1])
plt.show()
_ui_factory = _Factory()
'''
class StockMarketUserInterface(object):
def __init__(self, enable_cache=False):
"""
User interface for the stocks assigment.
Variables:
enable_cache: if set to True retrieved data will be cached.
"""
if not have_mpl:
raise _IPyException('Use of HouseMarketUserInterface has been disabled.')
self._enable_cache = enable_cache
pass
def _yql_query(self, q, _format, env):
req = {
'q': q,
'format': _format,
'env': env
}
data = urllib.parse.urlencode(req)
whole_url = YAHOO_URL + '?' + data
request = urllib.request.Request(whole_url)
handler = urllib.request.urlopen(request)
response = json.loads(handler.read())
return response
def _check_time_interval(self, start, end):
st = _time.strptime(start, "%Y-%m-%d")
en = _time.strptime(end, "%Y-%m-%d")
ds = _datetime.datetime.fromtimestamp(_time.mktime(st))
de = _datetime.datetime.fromtimestamp(_time.mktime(en))
ddays = (de - ds).days
if ddays > 365:
raise Exception("The largest time interval the API can handle is 365 days.")
def _load_cache(self, key):
try:
fp = open(".stock_cache", "rb")
db = _pickle.load(fp)
return db.get(key, None)
except Exception:
return None
def _store_cache(self, key, value):
db = {}
try:
with open(".stock_cache", "rb") as fp:
try:
db = _pickle.load(fp)
except Exception:
pass
except Exception:
pass
with open(".stock_cache", "wb+") as fp:
db[key] = value
_pickle.dump(db, fp)
def _cache_hash(self, symbol, start, end):
return symbol + start + end
def get_stock_quotes(self, symbol, start, end):
"""
Returns a list of dictionaries containing Yahoo historical stock quotes for variable 'symbol'.
Variables:
- symbol: (stock symbol e.g. AAPL, IBM, MSFT)
- start: start date of historical interval. Format: yyyy-mm-dd
- end: end date of historical interval. Format: yyyy-mm-dd
The Yahoo API supports a max time interval of 365 day, thus an exception is raised if
the interval between start and end > 365 days.
"""
self._check_time_interval(start, end)
if self._enable_cache:
cached = self._load_cache(self._cache_hash(symbol, start, end))
if cached:
return cached['query']['results']['quote']
response = self._yql_query(
'select * from yahoo.finance.historicaldata where symbol = "%s" and startDate = "%s" and endDate = "%s"' % (
symbol, start, end),
'json',
'store://datatables.org/alltableswithkeys'
)
results = response['query']['results']
if results is None:
raise Exception("No data avalable for quote symbol %s." % (symbol))
quotes = results['quote']
if self._enable_cache:
self._store_cache(self._cache_hash(symbol, start, end), response)
return quotes
def plot(self, prices, color, **kwargs):
"""
Plots the list of prices. With the color specified by the string 'color'.
Possible colors: 'b', 'g', 'r'
Use show() to display the plotted data.
Variables:
prices: list of floats with prices to be plotted.
**kwargs: (optional) additional kwargs.
"""
t = plt.arange(0, len(prices), 1)
lines = plt.plot(t, prices, c=color)
kwargs['linewidth'] = 2.0
plt.setp(lines, **kwargs)
return lines
def show(self):
"""
Draw the current state of the ui.
"""
plt.ylabel('Returns')
plt.xlabel('Day')
plt.show()
class HouseMarketUserInterface(object):
def __init__(self):
if not have_mpl:
raise _IPyException('Use of HouseMarketUserInterface has been disabled.')
self.max_x = 0 # Keep track of max observer x-value
def plot_dot(self, x, y, color, **kwargs):
"""
Plot the point (x,y) in the ui. With the color specified by the string 'color'.
Possible colors: 'b', 'g', 'r'
Arguments:
x: float
y: float
Advanced functionality: a list of floats may be supplied to both x and y to draw many points in one step.
"""
if isinstance(x, list):
self.max_x = max(max(x), self.max_x)
else:
self.max_x = max(x, self.max_x)
plt.plot(x, y, 'o', c=color, **kwargs)
def plot_line(self, *args, **kwargs):
"""
Plot the polynomial represented by the coefficients provided.
E.g. plot_line(2,1) would plot the function '2 + 1 * x'
plot_line(3,4,5) plots '5*x^2 + 4*x + 3'
"""
t = plt.arange(0.0, self.max_x, 0.01)
func = lambda x: sum([args[i] * (x ** i) for i in range(len(args))])
return plt.plot(t, func(t), **kwargs)
def show(self):
"""
Draw the current state of the ui.
"""
plt.ylabel('House Price')
plt.xlabel('House Size (m^2)')
orig_limit_x = plt.xlim()
orig_limit_y = plt.ylim()
a = plt.xlim(orig_limit_x[0], self.max_x + 0.1 * self.max_x)
a = plt.ylim(orig_limit_y[0] - 0.1 * orig_limit_y[0], orig_limit_y[1])
plt.show()
''' | 35.032641 | 160 | 0.59162 | import tkinter as _tk
import tkinter.dialog as _Dialog
import tkinter.filedialog as _tkFileDialog
import tkinter.messagebox as _tkMessageBox
import queue as _Queue
import threading as _threading
import time as _time
import os as _os
import random as _random
import sys as _sys
import datetime as _datetime
import pickle as _pickle
import urllib.request, urllib.error, urllib.parse
import urllib.request, urllib.parse, urllib.error
import json
if _os.environ.get('DISPLAY','') == '':
_os.environ.__setitem__('DISPLAY', ':0.0')
have_mpl = False
try:
import matplotlib as mpl
if _sys.platform == 'darwin':
mpl.use('TkAgg')
if _sys.platform == 'linux' or _sys.platform == 'linux2':
mpl.rcParams['backend'] = 'QT4Agg'
import pylab as plt
if _sys.platform == 'linux' or _sys.platform == 'linux2':
plt.switch_backend('QT4Agg') # Use QT4 for linux. Bug in TK.
have_mpl = True
except ImportError:
print("Could not import matplotlib. HouseMarketUserInterface and StockMarketUserInterface have been disabled.")
YAHOO_URL = 'https://query.yahooapis.com/v1/public/yql'
ALPHA_VANTAGE_URL = 'http://www.alphavantage.co/query'
class _IPyException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def _verify_int(value_var, string_var, minimum=None, maximum=None):
if not isinstance(value_var, int):
value = "%s not an int for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
_verify_input(value_var, string_var, minimum, maximum)
def _verify_float(value_var, string_var, minimum=None, maximum=None):
if not isinstance(value_var, float):
if not isinstance(value_var, int):
value = "%s is not a float or int for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
_verify_input(value_var, string_var, minimum, maximum)
def _verify_str(value_var, string_var):
if not isinstance(value_var, str):
value = "%s is not a string for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
def _verify_bool(value_var, string_var):
if not isinstance(value_var, bool):
value = "%s is not a boolean for %s, got %s" % (value_var, string_var, str(type(value_var))[1:-1])
raise _IPyException(value)
def _verify_input(value_var, string_var, minimum=None, maximum=None):
if minimum is None:
minimum = float('-inf')
if maximum is None:
maximum = float('inf')
if value_var >= minimum:
if value_var <= maximum:
return
value = "%s is out of bounds, expected range: %s to %s, got: %s" % (string_var, minimum, maximum, value_var)
raise _IPyException(value)
class _OthelloReplayHolder(object):
# used in the queue to hold values of the changes to be made
def __init__(self, x, y, color):
self.x = x
self.y = y
self.color = color
class _BarChartHolder(object):
# used in the queue to hold values of the changes to be made
def __init__(self, bar_index):
self.bar_index = bar_index
class _BarChartNameHolder(object):
# used in the queue to hold values of the changes to be made
def __init__(self, bar_index, bar_name):
self.bar_index = bar_index
self.bar_name = bar_name
class _SnakeHolder(object):
def __init__(self, x, y, color):
self.x = x
self.y = y
self.color = color
class _LifeHolder(object):
def __init__(self, x, y, color):
self.x = x
self.y = y
self.color = color
_ui_factory = None
def file_input():
global _ui_factory
f = _AskInput(_ui_factory.mainroot).f
if f == '':
return None
return str(_sys.stdin.read())
def ask_user(question, *options):
if len(options) == 0:
value = "User needs to be able to select at least 1 answer"
raise _IPyException(value)
global _ui_factory
return _AskUser(_ui_factory.mainroot, question, options).answer
class _Factory():
def __init__(self):
self.mainroot = _tk.Tk()
self.mainroot.withdraw()
self.mainroot.update()
class _AskInput(object):
def __init__(self, mainroot):
root = _tk.Toplevel(mainroot)
root.withdraw()
self.f = _tkFileDialog.askopenfilename(parent=root)
if self.f is not '':
_sys.stdin = open(self.f)
root.destroy()
class _AskUser(object):
def __init__(self, mainroot, question, options):
root = _tk.Toplevel(mainroot)
root.withdraw()
dg = _Dialog.Dialog(None,
title="",
text=question,
default=0,
bitmap=_tkMessageBox.QUESTION,
strings=options)
self.answer = options[dg.num]
root.destroy()
class OthelloReplayUserInterface(object):
def __init__(self, scale=1.0):
_verify_float(scale, 'Scale', 0.25, 1.0)
global _ui_factory
self.othello_replay = _Othello(_ui_factory.mainroot, scale)
self.NUMBER_OF_ROWS = _Othello.NUMBER_OF_ROWS
self.NUMBER_OF_COLUMNS = _Othello.NUMBER_OF_COLUMNS
self.EMPTY = _Othello.EMPTY
self.WHITE = _Othello.WHITE
self.BLACK = _Othello.BLACK
def place(self, x, y, color):
_verify_int(x, 'X', 0, self.NUMBER_OF_COLUMNS - 1)
_verify_int(y, 'Y', 0, self.NUMBER_OF_ROWS - 1)
# 0 = empty, 1 = white, 2 = black, 3 = white_t, 4 = black_t
_verify_int(color, 'Color', 0, 4)
self.othello_replay.place(x, y, color)
def place_transparent(self, x, y, color):
_verify_int(x, 'X', 0, self.NUMBER_OF_COLUMNS - 1)
_verify_int(y, 'Y', 0, self.NUMBER_OF_ROWS - 1)
# 0 = empty, 1 = white_t, 2 = black_t (before next step in code)
_verify_int(color, 'Color', 0, 2)
if color == self.EMPTY:
self.place(x, y, self.EMPTY)
else:
self.place(x, y, color + 2)
def clear(self):
self.othello_replay.clear()
def show(self):
self.othello_replay.show()
def print_(self, text):
_verify_str(text, "Text")
self.othello_replay.print_(text)
def clear_text(self):
self.othello_replay.clear_text()
def wait(self, ms):
_verify_int(ms, "Waiting time", 0)
self.othello_replay.wait(ms)
def close(self):
self.othello_replay.close()
def stay_open(self):
global _ui_factory
_ui_factory.mainroot.mainloop()
class _Othello(object):
# one cannot prevent users from editing 'constants', as constants simply do not exist in Python
NUMBER_OF_ROWS = 8
NUMBER_OF_COLUMNS = 8
EMPTY = 0
WHITE = 1
BLACK = 2
r = 20
g = 120
b = 0
BACKGROUND = "#%02X%02X%02X" % (r, g, b) # BACKGROUND = "#147800"?
def __init__(self, mainroot, scale=1.0):
# create queue to store changes to placings
self.to_show_queue = _Queue.Queue(maxsize=0)
# start the main window
self.root = _tk.Toplevel(mainroot)
self.root.title("OthelloReplayUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.root.resizable(False, False)
# calculate sizes
self.text_height = int(150 * scale)
self.othello_size = int(800 * scale)
# create main frame
self.frame = _tk.Frame(self.root, width=self.othello_size, height=self.othello_size + self.text_height)
self.frame.pack_propagate(0)
self.frame.pack()
# create board to hold references to othello-pieces
self.white_board = [] # for storing references to create_image
self.black_board = []
self.white_ghost_board = []
self.black_ghost_board = []
self.img_refs = [] # for storing references to images - order: white, black
# create and fill the canvas --> paintable area
self.c = _tk.Canvas(self.frame, width=self.othello_size, height=self.othello_size, bg=self.BACKGROUND, bd=0,
highlightthickness=0)
self.c.pack()
self.c.focus_set()
self.fill_canvas()
# create the textholder
self.scrollbar = _tk.Scrollbar(self.frame)
self.scrollbar.pack(side=_tk.RIGHT, fill=_tk.Y)
self.textarea = _tk.Text(self.frame, yscrollcommand=self.scrollbar.set, width=self.othello_size)
self.textarea.pack(side=_tk.LEFT, fill=_tk.BOTH)
self.scrollbar.config(command=self.textarea.yview)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def place(self, x, y, color):
element = _OthelloReplayHolder(x, y, color)
self.to_show_queue.put(element)
def clear(self):
for x in range(self.NUMBER_OF_COLUMNS):
for y in range(self.NUMBER_OF_ROWS):
self.place(x, y, self.EMPTY)
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
position = []
position.append(self.white_board[element.x][element.y])
position.append(self.black_board[element.x][element.y])
position.append(self.white_ghost_board[element.x][element.y])
position.append(self.black_ghost_board[element.x][element.y])
for i in range(len(position)):
if element.color == i + 1:
for e in position[i]:
self.c.itemconfig(e, state=_tk.NORMAL)
else:
for e in position[i]:
self.c.itemconfig(e, state=_tk.HIDDEN)
except _Queue.Empty:
pass
global _ui_factory
_ui_factory.mainroot.update()
def print_(self, text):
self.textarea.config(state=_tk.NORMAL)
self.textarea.insert(_tk.END, text)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def clear_text(self):
self.textarea.config(state=_tk.NORMAL)
self.textarea.delete(1.0, _tk.END)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
def close(self):
self.root.destroy()
_os._exit(0)
def create_othello_grid(self):
for i in range(self.NUMBER_OF_COLUMNS + 1):
x0 = self.xpad + self.xstep * i
y0 = self.ypad
x1 = x0
y1 = self.ypad + self.ystep * self.NUMBER_OF_ROWS + 1
coords = x0, y0, x1, y1
self.c.create_line(coords, fill='black')
for j in range(self.NUMBER_OF_ROWS + 1):
x0 = self.xpad
y0 = self.ypad + self.ystep * j
x1 = self.xpad + self.xstep * self.NUMBER_OF_COLUMNS + 1
y1 = y0
coords = x0, y0, x1, y1
self.c.create_line(coords, fill='black')
for i in range(self.NUMBER_OF_COLUMNS):
x0 = self.xpad + self.xstep / 2 + self.xstep * i
y0 = self.ypad / 2
x1 = x0
y1 = self.othello_size - self.ystep / 2
coords0 = x0, y0
coords1 = x1, y1
self.c.create_text(coords0, text=chr(ord('a') + i))
self.c.create_text(coords1, text=chr(ord('a') + i))
for j in range(self.NUMBER_OF_ROWS):
x0 = int(self.xpad / 2)
y0 = self.ypad + self.ystep / 2 + self.ystep * j
x1 = self.othello_size - self.xstep / 2
y1 = y0
coords0 = x0, y0
coords1 = x1, y1
self.c.create_text(coords0, text='%s' % (j + 1))
self.c.create_text(coords1, text='%s' % (j + 1))
def mix_color(self, c1, c2, mix):
return c1 if mix == 0 else int((c1 + c2) / 2)
def create_piece(self, x0, y0, img, mix):
result = []
if img == self.WHITE:
r = self.mix_color(255, self.r, mix)
g = self.mix_color(255, self.g, mix)
b = self.mix_color(255, self.b, mix)
scale = 0.8
x1 = x0 + (1.0 - scale) / 2.0 * self.xstep
y1 = y0 + (1.0 - scale) / 2.0 * self.ystep
x2 = x0 + (1.0 - (1.0 - scale) / 2.0) * self.xstep
y2 = y0 + (1.0 - (1.0 - scale) / 2.0) * self.ystep
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
if img == self.BLACK:
r = self.mix_color(0, self.r, mix)
g = self.mix_color(0, self.g, mix)
b = self.mix_color(0, self.b, mix)
scale = 0.8
x1 = x0 + (1.0 - scale) / 2.0 * self.xstep
y1 = y0 + (1.0 - scale) / 2.0 * self.ystep
x2 = x0 + (1.0 - (1.0 - scale) / 2.0) * self.xstep
y2 = y0 + (1.0 - (1.0 - scale) / 2.0) * self.ystep
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
return result
def create_othello_pieces(self):
mixer = 0, 0, 1, 1
imgtype = self.WHITE, self.BLACK, self.WHITE, self.BLACK
boards = self.white_board, self.black_board, self.white_ghost_board, self.black_ghost_board
for n in range(len(boards)):
for i in range(self.NUMBER_OF_COLUMNS):
boards[n].append([])
for j in range(self.NUMBER_OF_ROWS):
x0 = self.xpad + self.xstep * i
y0 = self.ypad + self.ystep * j
img = self.create_piece(x0, y0, imgtype[n], mixer[n])
boards[n][i].append(img)
def fill_canvas(self):
self.xstep = int(self.othello_size / (self.NUMBER_OF_COLUMNS + 2))
self.ystep = int(self.othello_size / (self.NUMBER_OF_ROWS + 2))
self.xpad = self.othello_size - self.NUMBER_OF_COLUMNS * self.xstep / 2 - self.othello_size / 2
self.ypad = self.othello_size - self.NUMBER_OF_ROWS * self.ystep / 2 - self.othello_size / 2
self.create_othello_grid()
self.create_othello_pieces()
class BarChartUserInterface(object):
def __init__(self, bar_count):
_verify_int(bar_count, "Bar count", 1)
global _ui_factory
self.bar_chart = _BarChart(bar_count, _ui_factory.mainroot)
def set_bar_name(self, bar_index, text):
_verify_int(bar_index, "Bar index", 0, self.bar_chart.bar_count - 1)
_verify_str(text, "Text")
self.bar_chart.set_bar_name(bar_index, text)
def raise_bar(self, bar_index):
_verify_int(bar_index, "Bar index", 0, self.bar_chart.bar_count - 1)
self.bar_chart.raise_bar(bar_index)
def show(self):
self.bar_chart.show()
def show_names(self, value):
_verify_bool(value, "Show names")
self.bar_chart.show_names(value)
def show_values(self, value):
_verify_bool(value, "Show values")
self.bar_chart.show_values(value)
def wait(self, ms):
_verify_int(ms, "Waiting time", 0)
self.bar_chart.wait(ms)
def close(self):
self.bar_chart.close()
def stay_open(self):
global _ui_factory
_ui_factory.mainroot.mainloop()
class _BarChart(object):
def __init__(self, bar_count, mainroot):
# create queue to store changes to placings
self.to_show_queue = _Queue.Queue(maxsize=0)
# variables used to keep the number of refreshes of names and values in check
self.show_names_bool = False
self.show_values_bool = True
self.bar_count = bar_count
# start the main window
self.root = _tk.Toplevel(mainroot)
self.root.title("BarChartUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.frame = _tk.Frame(self.root)
self.frame.pack(fill=_tk.BOTH, expand=_tk.YES)
self.height = 575
self.width = 400
self.c = _tk.Canvas(self.frame, width=self.width, height=self.height, bg='white', bd=0, highlightthickness=0)
self.c.pack(fill=_tk.BOTH, expand=_tk.YES)
self.c.focus_set()
self.c.bind('<Configure>', self.redraw)
self.bar_max = 0
self.bars = []
self.names = []
self.create_bars()
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def set_bar_name(self, bar_index, text):
self.names[bar_index] = text;
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def raise_bar(self, bar_index):
element = _BarChartHolder(bar_index)
self.to_show_queue.put(element)
def inc_bar(self, bar_index):
if (self.bars[bar_index] + 1) > self.bar_max:
self.bar_max = self.bars[bar_index] + 1
self.bars[bar_index] += 1
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
self.inc_bar(element.bar_index)
except _Queue.Empty:
pass
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def show_names(self, value):
self.show_names_bool = value
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def show_values(self, value):
self.show_values_bool = value
self.redraw()
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
global _ui_factory
_ui_factory.mainroot.update()
def close(self):
self.root.destroy()
_os._exit(0)
return
def create_bars(self):
for i in range(self.bar_count): # @UnusedVariable
self.bars.append(0)
self.names.append('')
def redraw(self, event=None):
if event != None:
self.width = event.width
self.height = event.height
for e in self.c.find_all():
self.c.delete(e)
self.fill_canvas()
def fill_canvas(self):
xstep = int(self.width / (self.bar_count + 2))
xpad = int((self.width - xstep * self.bar_count) / 2) #- self.width / 2
xspacing = int(xstep / 10)
ypad = int(self.height / 10) #- self.height / 2
ypadtext = int(ypad / 3)
for i in range(self.bar_count):
# draw the bar
x0 = xpad + xstep * i + xspacing
y0 = self.height - ypad
x1 = xpad + xstep * (i + 1) - xspacing
y1 = self.height - ypad
color = 0
if self.bar_max > 0:
y_len = self.bars[i] * int((self.height - 2 * ypad) / self.bar_max)
y1 -= y_len
color = self.bars[i] * int(255 / self.bar_max)
coords = x0, y0, x1, y1
hex_color = "#%02x%02x%02x" % (color, 0, 0) # red, green, blue
self.c.create_rectangle(coords, fill=hex_color)
# draw the values
x1 = xpad + xstep * i + int(xstep / 2)
y1 -= ypadtext
coords = x1, y1
value = ("%d" % self.bars[i]) if self.show_values_bool else ''
self.c.create_text(coords, text=value)
# draw the names
x0 = xpad + xstep * i + int(xstep / 2)
y0 += ypadtext
coords = x0, y0
name = self.names[i] if self.show_names_bool else ''
self.c.create_text(coords, text=name)
class SnakeUserInterface(object):
def __init__(self, width, height, scale=1.0):
_verify_int(width, "Width", 1)
_verify_int(height, "Height", 1)
_verify_float(scale, 'Scale', 0.25, 1.0)
global _ui_factory
self.snake_interface = _Snake(width, height, _ui_factory.mainroot, scale)
self.EMPTY = _Snake.EMPTY
self.FOOD = _Snake.FOOD
self.SNAKE = _Snake.SNAKE
self.WALL = _Snake.WALL
def place(self, x, y, color):
_verify_int(x, 'X', 0, self.snake_interface.width - 1)
_verify_int(y, 'Y', 0, self.snake_interface.height - 1)
# 0 = empty, 1 = food, 2 = snake, 3 = wall, 4 = food_t, 5 = snake_t, 6 = wall_t
_verify_int(color, 'Color', 0, 6)
self.snake_interface.place(x, y, color)
def place_transparent(self, x, y, color):
_verify_int(x, 'X', 0, self.snake_interface.width - 1)
_verify_int(y, 'Y', 0, self.snake_interface.height - 1)
# 0 = empty, 1 = food_t, 2 = snake_t, 3 = wall_t (before next step in code)
_verify_int(color, 'Color', 0, 6)
if color == self.EMPTY:
self.place(x, y, self.EMPTY)
else:
self.place(x, y, color + 3)
def clear(self):
self.snake_interface.clear()
def show(self):
self.snake_interface.show()
def get_event(self):
return self.snake_interface.get_event()
def set_animation_speed(self, fps):
_verify_float(fps, "Animation speed")
self.snake_interface.set_animation_speed(fps)
def print_(self, text):
_verify_str(text, "Text")
self.snake_interface.print_(text)
def clear_text(self):
self.snake_interface.clear_text()
def wait(self, ms):
_verify_int(ms, "Waiting time", 0)
self.snake_interface.wait(ms)
def random(self, maximum):
_verify_int(maximum, 'Random', 1)
return self.snake_interface.random(maximum)
def close(self):
self.snake_interface.close()
def stay_open(self):
global _ui_factory
_ui_factory.mainroot.mainloop()
class _Snake(object):
# one cannot prevent users from editing 'constants', as constants simply do not exist in Python
EMPTY = 0
FOOD = 1
SNAKE = 2
WALL = 3
def __init__(self, width, height, mainroot, scale=1.0):
# create queue to store changes to placings
self.to_show_queue = _Queue.Queue(maxsize=0)
self.event_queue = _Queue.Queue(maxsize=0)
# copy params
self.width = width
self.height = height
self.scale = scale
self.closing_window = False
# start the main window
self.root = _tk.Toplevel(mainroot)
self.root.title("SnakeUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.root.resizable(False, False)
# calculate sizes
self.size_per_coord = int(25 * scale)
self.text_height = int(100 * scale)
# create main frame
self.frame = _tk.Frame(self.root, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height + self.text_height)
self.frame.pack_propagate(0)
self.frame.pack()
# create board to hold references to snake-pieces
self.food_board = [] # for storing references to create_image
self.snake_board = []
self.wall_board = []
self.food_ghost_board = []
self.snake_ghost_board = []
self.wall_ghost_board = []
self.img_refs = [] # for storing references to images - order: food, snake, wall, food_t, snake_t, wall_t
# create and fill the canvas --> paintable area
self.c = _tk.Canvas(self.frame, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height, bg="black", bd=0, highlightthickness=0)
self.c.pack()
self.last_x = -1 # used to generate mouseOver/Exit events
self.last_y = -1 # used to generate mouseOver/Exit events
self.fill_canvas()
# create the textholder
self.scrollbar = _tk.Scrollbar(self.frame)
self.scrollbar.pack(side=_tk.RIGHT, fill=_tk.Y)
self.textarea = _tk.Text(self.frame, yscrollcommand=self.scrollbar.set)
self.textarea.pack(side=_tk.LEFT, fill=_tk.BOTH)
self.scrollbar.config(command=self.textarea.yview)
self.textarea.config(state=_tk.DISABLED)
self.interval = 0
self.alarm_speed = 0
self.timer = self.milliseconds()
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def milliseconds(self):
return _time.time() * 1000
def place(self, x, y, color):
element = _SnakeHolder(x, y, color)
self.to_show_queue.put(element)
def clear(self):
for x in range(self.width):
for y in range(self.height):
self.place(x, y, self.EMPTY)
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
position = []
position.append(self.food_board[element.x][element.y])
position.append(self.snake_board[element.x][element.y])
position.append(self.wall_board[element.x][element.y])
position.append(self.food_ghost_board[element.x][element.y])
position.append(self.snake_ghost_board[element.x][element.y])
position.append(self.wall_ghost_board[element.x][element.y])
for i in range(len(position)):
# add 1 to i, because 0 is empty [same as doing color - 1]
# thus, if 0, then it doesn't match with 1 to 6
if element.color == i + 1:
for e in position[i]:
self.c.itemconfig(e, state=_tk.NORMAL)
else:
for e in position[i]:
self.c.itemconfig(e, state=_tk.HIDDEN)
except _Queue.Empty:
pass
global _ui_factory
_ui_factory.mainroot.update()
def get_event(self):
global _ui_factory
_ui_factory.mainroot.update()
while True:
try:
self.refresh_event()
event = self.event_queue.get_nowait()
return event
except _Queue.Empty:
wait_time = min(self.interval, 10)
self.wait(wait_time)
_ui_factory.mainroot.update()
def set_animation_speed(self, fps):
current_time = self.milliseconds()
if fps <= 0:
self.interval = 0
self.timer = current_time
return
if fps > 1000:
fps = 1000
self.interval = int(1000.0 / fps)
self.refresh_event()
def print_(self, text):
self.textarea.config(state=_tk.NORMAL)
self.textarea.insert(_tk.END, text)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def clear_text(self):
self.textarea.config(state=_tk.NORMAL)
self.textarea.delete(1.0, _tk.END)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
def close(self):
self.root.destroy()
_os._exit(0)
def random(self, maximum=1):
return int(_random.random() * maximum)
def create_piece(self, x0, y0, img, mix):
result = []
if img == self.FOOD:
r = int(255 / (1 + mix))
g = int(64 / (1 + mix))
b = int(64 / (1 + mix))
scale = 0.8
x1 = x0 + (1.0 - scale) / 2.0 * self.size_per_coord
y1 = y0 + (1.0 - scale) * self.size_per_coord
x2 = x0 + (1.0 - (1.0 - scale) / 2.0) * self.size_per_coord
y2 = y0 + self.size_per_coord
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
r = int(64 / (1 + mix))
g = int(255 / (1 + mix))
b = int(64 / (1 + mix))
scale = 0.4
x1 = x0 + self.size_per_coord / 2.0
y1 = y0
x2 = x1
y2 = y0 + scale * self.size_per_coord
result.append(
self.c.create_line(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=2))
if img == self.SNAKE:
r = int(32 / (1 + mix))
g = int(255 / (1 + mix))
b = int(0 / (1 + mix))
x1 = x0
y1 = y0
x2 = x0 + self.size_per_coord
y2 = y0 + self.size_per_coord
result.append(
self.c.create_oval(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
if img == self.WALL:
r = int(200 / (1 + mix))
g = int(100 / (1 + mix))
b = int(0 / (1 + mix))
x1 = x0
y1 = y0
x2 = x0 + self.size_per_coord
y2 = y0 + self.size_per_coord
result.append(
self.c.create_rectangle(x1, y1, x2, y2, state=_tk.HIDDEN, fill="#%02X%02X%02X" % (r, g, b), width=0))
return result
def create_snake_pieces(self):
mixer = 0, 0, 0, 1, 1, 1
imgtype = self.FOOD, self.SNAKE, self.WALL, self.FOOD, self.SNAKE, self.WALL
boards = self.food_board, self.snake_board, self.wall_board, self.food_ghost_board, self.snake_ghost_board, self.wall_ghost_board
for n in range(len(boards)):
for i in range(self.width):
boards[n].append([])
for j in range(self.height):
x0 = self.size_per_coord * i
y0 = self.size_per_coord * j
img = self.create_piece(x0, y0, imgtype[n], mixer[n])
boards[n][i].append(img)
def fill_canvas(self):
self.bind_events()
self.create_snake_pieces()
def motion_event(self, event):
if not self.mouse_on_screen:
return
x_old = self.last_x
y_old = self.last_y
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
x_change = int(x_old) != int(x_new)
y_change = int(y_old) != int(y_new)
if x_change or y_change:
self.generate_event("mouseexit", "%d %d" % (x_old, y_old))
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
def enter_window_event(self, event):
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
self.mouse_on_screen = True
def leave_window_event(self, event):
self.generate_event("mouseexit", "%d %d" % (self.last_x, self.last_y))
self.mouse_on_screen = False
def alt_number_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("alt_number", event.char)
def key_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("number", event.char)
elif ord(event.char) >= ord('a') and ord(event.char) <= ord('z'):
self.generate_event("letter", event.char)
elif ord(event.char) >= ord('A') and ord(event.char) <= ord('Z'):
self.generate_event("letter", event.char)
else:
self.generate_event("other", event.char)
elif event.keysym == 'Up':
self.generate_event("arrow", "u")
elif event.keysym == 'Down':
self.generate_event("arrow", "d")
elif event.keysym == 'Left':
self.generate_event("arrow", "l")
elif event.keysym == 'Right':
self.generate_event("arrow", "r")
elif event.keysym == 'Multi_Key':
return
elif event.keysym == 'Caps_Lock':
self.generate_event("other", "caps lock")
elif event.keysym == 'Num_Lock':
self.generate_event("other", "num lock")
elif event.keysym == 'Shift_L' or event.keysym == 'Shift_R':
self.generate_event("other", "shift")
elif event.keysym == 'Control_L' or event.keysym == 'Control_R':
self.generate_event("other", "control")
elif event.keysym == 'Alt_L' or event.keysym == 'Alt_R':
self.generate_event("other", "alt")
else:
self.generate_event("other", event.keysym)
def click_event(self, event):
x = event.x / self.size_per_coord
y = event.y / self.size_per_coord
self.generate_event("click", "%d %d" % (x, y))
def refresh_event(self):
current_time = self.milliseconds()
threshold = current_time - self.timer - self.interval
if threshold >= 0 and self.interval > 0:
self.generate_event("alarm", "refresh")
self.timer = current_time
def generate_event(self, name, data):
event = Event(name, data)
self.event_queue.put(event)
def bind_events(self):
self.c.focus_set()
self.c.bind("<Motion>", self.motion_event)
self.c.bind("<Enter>", self.enter_window_event)
self.c.bind("<Leave>", self.leave_window_event)
self.c.bind("<Alt-Key>", self.alt_number_event)
self.c.bind("<Key>", self.key_event)
self.c.bind("<Button-1>", self.click_event)
class LifeUserInterface(object):
def __init__(self, width, height, scale=1.0):
_verify_int(width, "Width", 1)
_verify_int(height, "Height", 1)
_verify_float(scale, 'Scale', 0.25, 1.0)
global _ui_factory
self.life_interface = _Life(width, height, _ui_factory.mainroot, scale)
self.DEAD = _Life.DEAD
self.ALIVE = _Life.ALIVE
def place(self, x, y, color):
_verify_int(x, 'X', 0, self.life_interface.width - 1)
_verify_int(y, 'Y', 0, self.life_interface.height - 1)
_verify_int(color, 'Color', 0, 2)
self.life_interface.place(x, y, color)
def clear(self):
self.life_interface.clear()
def show(self):
self.life_interface.show()
def get_event(self):
return self.life_interface.get_event()
def set_animation_speed(self, fps):
_verify_float(fps, "Animation speed")
self.life_interface.set_animation_speed(fps)
def print_(self, text):
_verify_str(text, "Text")
self.life_interface.print_(text)
def clear_text(self):
self.life_interface.clear_text()
def wait(self, ms):
_verify_int(ms, "Waiting time", 0)
self.life_interface.wait(ms)
def random(self, maximum):
_verify_int(maximum, 'Random', 1)
return self.life_interface.random(maximum)
def close(self):
self.life_interface.close()
def stay_open(self):
global _ui_factory
_ui_factory.mainroot.mainloop()
class _Life(object):
DEAD = 0
ALIVE = 1
BACKGROUND = "#000000"
def __init__(self, width, height, mainroot, scale=1.0):
self.to_show_queue = _Queue.Queue(maxsize=0)
self.event_queue = _Queue.Queue(maxsize=0)
self.width = width
self.height = height
self.scale = scale
self.root = _tk.Toplevel(mainroot)
self.root.title("LifeUserInterface")
self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.root.bind("<Escape>", self.callback)
self.root.resizable(False, False)
self.size_per_coord = int(25 * scale)
self.text_height = int(100 * scale)
self.frame = _tk.Frame(self.root, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height + self.text_height)
self.frame.pack_propagate(0)
self.frame.pack()
self.dead_board = []
self.alive_board = []
self.img_refs = []
self.c = _tk.Canvas(self.frame, width=self.size_per_coord * self.width,
height=self.size_per_coord * self.height, bg=self.BACKGROUND, bd=0, highlightthickness=0)
self.c.pack()
self.last_x = -1
self.last_y = -1
self.fill_canvas()
self.scrollbar = _tk.Scrollbar(self.frame)
self.scrollbar.pack(side=_tk.RIGHT, fill=_tk.Y)
self.textarea = _tk.Text(self.frame, yscrollcommand=self.scrollbar.set)
self.textarea.pack(side=_tk.LEFT, fill=_tk.BOTH)
self.scrollbar.config(command=self.textarea.yview)
self.textarea.config(state=_tk.DISABLED)
self.interval = 0
self.alarm_speed = 0
self.timer = self.milliseconds()
global _ui_factory
_ui_factory.mainroot.update()
def callback(self, event=None):
self.root.destroy()
_os._exit(0)
def milliseconds(self):
return _time.time() * 1000
def place(self, x, y, color):
element = _LifeHolder(x, y, color)
self.to_show_queue.put(element)
def clear(self):
for x in range(self.width):
for y in range(self.height):
self.place(x, y, self.DEAD)
def show(self):
try:
while True:
element = self.to_show_queue.get_nowait()
position = []
position.append(self.dead_board[element.x][element.y])
position.append(self.alive_board[element.x][element.y])
for i in range(len(position)):
if element.color == i:
for e in position[i]:
self.c.itemconfig(e, state=_tk.NORMAL)
else:
for e in position[i]:
self.c.itemconfig(e, state=_tk.HIDDEN)
except _Queue.Empty:
pass
global _ui_factory
_ui_factory.mainroot.update()
def get_event(self):
global _ui_factory
_ui_factory.mainroot.update()
while True:
try:
self.refresh_event()
event = self.event_queue.get_nowait()
return event
except _Queue.Empty:
wait_time = min(self.interval, 10)
self.wait(wait_time)
_ui_factory.mainroot.update()
def set_animation_speed(self, fps):
current_time = self.milliseconds()
if fps <= 0:
self.interval = 0
self.timer = current_time
return
if fps > 1000:
fps = 1000
self.interval = int(1000.0 / fps)
self.refresh_event()
def print_(self, text):
self.textarea.config(state=_tk.NORMAL)
self.textarea.insert(_tk.END, text)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def clear_text(self):
self.textarea.config(state=_tk.NORMAL)
self.textarea.delete(1.0, _tk.END)
self.textarea.see(_tk.END)
self.textarea.config(state=_tk.DISABLED)
global _ui_factory
_ui_factory.mainroot.update()
def wait(self, ms):
try:
_time.sleep(ms * 0.001)
except:
self.close()
def close(self):
self.root.destroy()
_os._exit(0)
def random(self, maximum=1):
return int(_random.random() * maximum)
def create_piece(self, x0, y0, img, state_):
result = []
if img == self.DEAD:
r = 255
g = 255
b = 255
x1 = x0
y1 = y0
x2 = x0 + self.size_per_coord - 1
y2 = y0 + self.size_per_coord - 1
result.append(
self.c.create_rectangle(x1, y1, x2, y2, state=state_, fill="#%02X%02X%02X" % (r, g, b), width=1))
if img == self.ALIVE:
r = 0
g = 0
b = 255
x1 = x0
y1 = y0
x2 = x0 + self.size_per_coord - 1
y2 = y0 + self.size_per_coord - 1
result.append(
self.c.create_rectangle(x1, y1, x2, y2, state=state_, fill="#%02X%02X%02X" % (r, g, b), width=1))
return result
def create_life_pieces(self):
imgtype = self.DEAD, self.ALIVE
boards = self.dead_board, self.alive_board
for n in range(len(boards)):
for i in range(self.width):
boards[n].append([])
for j in range(self.height):
x0 = self.size_per_coord * i
y0 = self.size_per_coord * j
state_ = _tk.HIDDEN
if n == 0:
state_ = _tk.NORMAL
img = self.create_piece(x0, y0, imgtype[n], state_)
boards[n][i].append(img)
def fill_canvas(self):
self.bind_events()
self.create_life_pieces()
def motion_event(self, event):
if not self.mouse_on_screen:
return
x_old = self.last_x
y_old = self.last_y
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
x_change = int(x_old) != int(x_new)
y_change = int(y_old) != int(y_new)
if x_change or y_change:
self.generate_event("mouseexit", "%d %d" % (x_old, y_old))
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
def enter_window_event(self, event):
x_new = event.x / self.size_per_coord
y_new = event.y / self.size_per_coord
self.generate_event("mouseover", "%d %d" % (x_new, y_new))
self.last_x = x_new
self.last_y = y_new
self.mouse_on_screen = True
def leave_window_event(self, event):
self.generate_event("mouseexit", "%d %d" % (self.last_x, self.last_y))
self.mouse_on_screen = False
def alt_number_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("alt_number", event.char)
def key_event(self, event):
if event.char == event.keysym:
if ord(event.char) >= ord('0') and ord(event.char) <= ord('9'):
self.generate_event("number", event.char)
elif ord(event.char) >= ord('a') and ord(event.char) <= ord('z'):
self.generate_event("letter", event.char)
elif ord(event.char) >= ord('A') and ord(event.char) <= ord('Z'):
self.generate_event("letter", event.char)
else:
self.generate_event("other", event.char)
elif event.keysym == 'Up':
self.generate_event("arrow", "u")
elif event.keysym == 'Down':
self.generate_event("arrow", "d")
elif event.keysym == 'Left':
self.generate_event("arrow", "l")
elif event.keysym == 'Right':
self.generate_event("arrow", "r")
elif event.keysym == 'Multi_Key':
return
elif event.keysym == 'Caps_Lock':
self.generate_event("other", "caps lock")
elif event.keysym == 'Num_Lock':
self.generate_event("other", "num lock")
elif event.keysym == 'Shift_L' or event.keysym == 'Shift_R':
self.generate_event("other", "shift")
elif event.keysym == 'Control_L' or event.keysym == 'Control_R':
self.generate_event("other", "control")
elif event.keysym == 'Alt_L' or event.keysym == 'Alt_R':
self.generate_event("other", "alt")
else:
self.generate_event("other", event.keysym)
def click_event(self, event):
x = event.x / self.size_per_coord
y = event.y / self.size_per_coord
self.generate_event("click", "%d %d" % (x, y))
def refresh_event(self):
current_time = self.milliseconds()
threshold = current_time - self.timer - self.interval
if threshold >= 0 and self.interval > 0:
self.generate_event("alarm", "refresh")
self.timer = current_time
def generate_event(self, name, data):
event = Event(name, data)
self.event_queue.put(event)
def bind_events(self):
self.c.focus_set()
self.c.bind("<Motion>", self.motion_event)
self.c.bind("<Enter>", self.enter_window_event)
self.c.bind("<Leave>", self.leave_window_event)
self.c.bind("<Alt-Key>", self.alt_number_event)
self.c.bind("<Key>", self.key_event)
self.c.bind("<Button-1>", self.click_event)
class Event(object):
def __init__(self, name, data):
self.name = name
self.data = data
class StockMarketUserInterface(object):
def __init__(self, enable_cache=False):
if not have_mpl:
raise Exception('Use of HouseMarketUserInterface has been disabled.')
self._enable_cache = enable_cache
pass
def _yql_query(self, q, _format, env):
req = {
'q': q,
'format': _format,
'env': env
}
data = urllib.parse.urlencode(req)
whole_url = YAHOO_URL + '?' + data
request = urllib.request.Request(whole_url)
handler = urllib.request.urlopen(request)
response = json.loads(handler.read())
return response
def _av_query(self, symbol):
whole_url = ALPHA_VANTAGE_URL + "?function=TIME_SERIES_DAILY_ADJUSTED&apikey=Z2YF&symbol=%s&outputsize=full" % symbol
request = urllib.request.Request(whole_url)
handler = urllib.request.urlopen(request)
response = json.loads(handler.read())
if 'Error Message' in response:
request = urllib.request.Request(whole_url)
handler = urllib.request.urlopen(request)
response = json.loads(handler.read())
return response
def _check_time_interval(self, start, end):
st = _time.strptime(start, "%Y-%m-%d")
en = _time.strptime(end, "%Y-%m-%d")
ds = _datetime.datetime.fromtimestamp(_time.mktime(st))
de = _datetime.datetime.fromtimestamp(_time.mktime(en))
def _load_cache(self, key):
try:
fp = open(".stock_cache", "rb")
db = _pickle.load(fp)
return db.get(key, None)
except Exception:
return None
def _store_cache(self, key, value):
db = {}
try:
with open(".stock_cache", "rb") as fp:
try:
db = _pickle.load(fp)
except Exception:
pass
except Exception:
pass
with open(".stock_cache", "wb+") as fp:
db[key] = value
_pickle.dump(db, fp)
def _cache_hash(self, symbol, start, end):
return symbol + start + end
def _av_rekey(self, dictionary):
rekey = {
'Adj_Close': '5. adjusted close',
'open': '1. open',
'high': '2. high',
'low': '3. low',
'close': '4. close',
'volume': '6. volume'
}
new = {}
for v, k in rekey.items():
if k in dictionary:
new[v] = float(dictionary[k])
return new
def get_stock_quotes(self, symbol, start, end):
self._check_time_interval(start, end)
if self._enable_cache:
cached = self._load_cache(self._cache_hash(symbol, start, end))
if cached:
return cached
response = self._av_query(symbol)
if 'Error Message' in response:
raise Exception("No data available for quote symbol %s." % symbol)
results = response['Time Series (Daily)']
st = _time.strptime(start, "%Y-%m-%d")
sp = _time.strptime(end, "%Y-%m-%d")
quotes = [t for t in [(_time.strptime(x[0].split()[0], "%Y-%m-%d"), x[1]) for x in list(results.items())] if sp >= t[0] >= st]
formatted_quotes = [self._av_rekey(x[1]) for x in sorted(quotes,key=lambda x: x[0], reverse=True)]
if self._enable_cache:
self._store_cache(self._cache_hash(symbol, start, end), formatted_quotes)
return formatted_quotes
def plot(self, prices, color, **kwargs):
t = plt.arange(0, len(prices), 1)
lines = plt.plot(t, prices, c=color)
kwargs['linewidth'] = 2.0
plt.setp(lines, **kwargs)
return lines
def show(self):
plt.ylabel('Returns')
plt.xlabel('Day')
plt.show()
class HouseMarketUserInterface(object):
def __init__(self):
if not have_mpl:
raise Exception('Use of HouseMarketUserInterface has been disabled.')
self.max_x = 0
def plot_dot(self, x, y, color, **kwargs):
if isinstance(x, list):
self.max_x = max(max(x), self.max_x)
else:
self.max_x = max(x, self.max_x)
plt.plot(x, y, 'o', c=color, **kwargs)
def plot_line(self, *args, **kwargs):
t = plt.arange(0.0, self.max_x, 0.01)
func = lambda x: sum([args[i] * (x ** i) for i in range(len(args))])
return plt.plot(t, func(t), **kwargs)
def show(self):
plt.ylabel('House Price')
plt.xlabel('House Size (m^2)')
orig_limit_x = plt.xlim()
orig_limit_y = plt.ylim()
a = plt.xlim(orig_limit_x[0], self.max_x + 0.1 * self.max_x)
a = plt.ylim(orig_limit_y[0] - 0.1 * orig_limit_y[0], orig_limit_y[1])
plt.show()
_ui_factory = _Factory()
| true | true |
1c3c4e3803b9202adb0b6574ba964cbaae36a77b | 316 | py | Python | env/lib/python3.5/site-packages/pylint/test/functional/wrong_exception_operation.py | Udolf15/recommedMeMovies | be5ae74acd98e3f93beaaa5bb55623974fb24247 | [
"MIT"
] | 463 | 2015-01-15T08:17:42.000Z | 2022-03-28T15:10:20.000Z | env/lib/python3.5/site-packages/pylint/test/functional/wrong_exception_operation.py | Udolf15/recommedMeMovies | be5ae74acd98e3f93beaaa5bb55623974fb24247 | [
"MIT"
] | 52 | 2015-01-06T02:43:59.000Z | 2022-03-14T11:15:21.000Z | env/lib/python3.5/site-packages/pylint/test/functional/wrong_exception_operation.py | Udolf15/recommedMeMovies | be5ae74acd98e3f93beaaa5bb55623974fb24247 | [
"MIT"
] | 249 | 2015-01-07T22:49:49.000Z | 2022-03-18T02:32:06.000Z | # pylint: disable=missing-docstring, superfluous-parens
try:
1/0
except (ValueError | TypeError): # [wrong-exception-operation]
pass
try:
1/0
except (ValueError + TypeError): # [wrong-exception-operation]
pass
try:
1/0
except (ValueError < TypeError): # [wrong-exception-operation]
pass
| 16.631579 | 62 | 0.686709 |
try:
1/0
except (ValueError | TypeError):
pass
try:
1/0
except (ValueError + TypeError):
pass
try:
1/0
except (ValueError < TypeError):
pass
| true | true |
1c3c4f3ecb0e4c73eb385a69fc9f5d9d15933f65 | 2,679 | py | Python | src/training_utils.py | ArminKaramzade/SequenceMixup | 52eb053bd21f81db0aba0932da83dc06aaaee46f | [
"MIT"
] | null | null | null | src/training_utils.py | ArminKaramzade/SequenceMixup | 52eb053bd21f81db0aba0932da83dc06aaaee46f | [
"MIT"
] | null | null | null | src/training_utils.py | ArminKaramzade/SequenceMixup | 52eb053bd21f81db0aba0932da83dc06aaaee46f | [
"MIT"
] | null | null | null | import numpy as np
def pad_sequence(sequences, batch_first=True, padding_value=0, padding='post'):
max_size = sequences[0].size()
trailing_dims = max_size[1:]
max_len = max([s.size(0) for s in sequences])
if batch_first:
out_dims = (len(sequences), max_len) + trailing_dims
else:
out_dims = (max_len, len(sequences)) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
if padding == 'post':
for i, tensor in enumerate(sequences):
length = tensor.size(0)
if batch_first:
out_tensor[i, :length, ...] = tensor
else:
out_tensor[:length, i, ...] = tensor
elif padding == 'pre':
for i, tensor in enumerate(sequences):
length = tensor.size(0)
if batch_first:
out_tensor[i, -length:, ...] = tensor
else:
out_tensor[-length:, i, ...] = tensor
else:
raise ValueError("Padding must be 'post' or 'pre'")
return out_tensor
def sort(c):
a, b = c
idx = [i[0] for i in sorted(enumerate(a), key=lambda s: len(s[1]), reverse=True)]
return ([a[i] for i in idx], [b[i] for i in idx])
def beta_lambdas_generator(alpha, beta, batch_size, length, repeat, rho):
def extend(a, repeat):
# a.shape = (batch_size, length)
# repeat = (n1, n2, ...)
a = np.tile(a, repeat+(1, 1)) # ((repeat), batch_size, length)
a = np.rollaxis(a, len(a.shape)-2, 0) # (batch_size, (repeat), length)
return np.rollaxis(a, len(a.shape)-1, 1) # (batch_size, length, (repeat))
def get_ab(alpha, beta, rho, x):
c1 = rho * (alpha / (alpha + beta)) + (1 - rho) * (x)
c2 = (rho**2) * (alpha*beta) / (((alpha + beta)**2) * (alpha + beta + 1))
if c2 == 0:
a = 1e9
else:
a = (c1 * (1 - c1) - c2) * c1 / c2
if c1 == 0:
b = 1e9
else:
b = a * (1. / c1 - 1)
return max(1e-9, a), max(1e-9, b)
if rho == 0:
lambdas = np.random.beta(alpha, beta, (batch_size))
lambdas = np.tile(lambdas, (length, 1))
lambdas = np.rollaxis(lambdas, len(lambdas.shape)-1, 0)
return extend(lambdas, repeat)
lambdas = np.zeros((batch_size, length))
for i in range(batch_size):
for j in range(length):
if j == 0:
lambdas[i, j] = np.random.beta(alpha, beta)
else:
a, b = get_ab(alpha, beta, rho, lambdas[i, j-1])
lambdas[i, j] = np.random.beta(a, b)
return extend(lambdas, repeat) | 36.69863 | 85 | 0.524823 | import numpy as np
def pad_sequence(sequences, batch_first=True, padding_value=0, padding='post'):
max_size = sequences[0].size()
trailing_dims = max_size[1:]
max_len = max([s.size(0) for s in sequences])
if batch_first:
out_dims = (len(sequences), max_len) + trailing_dims
else:
out_dims = (max_len, len(sequences)) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
if padding == 'post':
for i, tensor in enumerate(sequences):
length = tensor.size(0)
if batch_first:
out_tensor[i, :length, ...] = tensor
else:
out_tensor[:length, i, ...] = tensor
elif padding == 'pre':
for i, tensor in enumerate(sequences):
length = tensor.size(0)
if batch_first:
out_tensor[i, -length:, ...] = tensor
else:
out_tensor[-length:, i, ...] = tensor
else:
raise ValueError("Padding must be 'post' or 'pre'")
return out_tensor
def sort(c):
a, b = c
idx = [i[0] for i in sorted(enumerate(a), key=lambda s: len(s[1]), reverse=True)]
return ([a[i] for i in idx], [b[i] for i in idx])
def beta_lambdas_generator(alpha, beta, batch_size, length, repeat, rho):
def extend(a, repeat):
a = np.tile(a, repeat+(1, 1))
a = np.rollaxis(a, len(a.shape)-2, 0)
return np.rollaxis(a, len(a.shape)-1, 1)
def get_ab(alpha, beta, rho, x):
c1 = rho * (alpha / (alpha + beta)) + (1 - rho) * (x)
c2 = (rho**2) * (alpha*beta) / (((alpha + beta)**2) * (alpha + beta + 1))
if c2 == 0:
a = 1e9
else:
a = (c1 * (1 - c1) - c2) * c1 / c2
if c1 == 0:
b = 1e9
else:
b = a * (1. / c1 - 1)
return max(1e-9, a), max(1e-9, b)
if rho == 0:
lambdas = np.random.beta(alpha, beta, (batch_size))
lambdas = np.tile(lambdas, (length, 1))
lambdas = np.rollaxis(lambdas, len(lambdas.shape)-1, 0)
return extend(lambdas, repeat)
lambdas = np.zeros((batch_size, length))
for i in range(batch_size):
for j in range(length):
if j == 0:
lambdas[i, j] = np.random.beta(alpha, beta)
else:
a, b = get_ab(alpha, beta, rho, lambdas[i, j-1])
lambdas[i, j] = np.random.beta(a, b)
return extend(lambdas, repeat) | true | true |
1c3c4f7d326d3e61a2360b50c58382ee9fd91e55 | 2,179 | py | Python | tests/models/v2/save_config_spec_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | tests/models/v2/save_config_spec_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | tests/models/v2/save_config_spec_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | #!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.v2.save_config_spec import SaveConfigSpec
class SaveConfigSpecTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_save_config_spec(self):
save_config_spec_obj = SaveConfigSpec()
self.assertNotEqual(save_config_spec_obj, None)
| 57.342105 | 845 | 0.773291 |
import unittest
from netapp.santricity.models.v2.save_config_spec import SaveConfigSpec
class SaveConfigSpecTest(unittest.TestCase):
def test_save_config_spec(self):
save_config_spec_obj = SaveConfigSpec()
self.assertNotEqual(save_config_spec_obj, None)
| true | true |
1c3c4fd2537cd0c79606063e10cad90a4c941a2f | 847 | py | Python | doc/extensions/yt_showfields.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 2 | 2021-03-02T18:59:49.000Z | 2021-03-02T18:59:50.000Z | doc/extensions/yt_showfields.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 4 | 2018-04-13T23:03:42.000Z | 2018-05-08T17:50:43.000Z | doc/extensions/yt_showfields.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 2 | 2020-05-16T15:29:37.000Z | 2020-06-22T10:17:08.000Z | import subprocess
import sys
from docutils.parsers.rst import Directive
def setup(app):
app.add_directive('yt_showfields', ShowFields)
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
retdict = dict(
version='1.0',
parallel_read_safe=True,
parallel_write_safe=True
)
return retdict
class ShowFields(Directive):
required_arguments = 0
optional_arguments = 0
parallel_read_safe = True
parallel_write_safe = True
def run(self):
rst_file = self.state_machine.document.attributes['source']
lines = subprocess.check_output(
[sys.executable, './helper_scripts/show_fields.py'])
lines = lines.decode('utf8')
lines = lines.split('\n')
self.state_machine.insert_input(lines, rst_file)
return []
| 25.666667 | 67 | 0.661157 | import subprocess
import sys
from docutils.parsers.rst import Directive
def setup(app):
app.add_directive('yt_showfields', ShowFields)
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
retdict = dict(
version='1.0',
parallel_read_safe=True,
parallel_write_safe=True
)
return retdict
class ShowFields(Directive):
required_arguments = 0
optional_arguments = 0
parallel_read_safe = True
parallel_write_safe = True
def run(self):
rst_file = self.state_machine.document.attributes['source']
lines = subprocess.check_output(
[sys.executable, './helper_scripts/show_fields.py'])
lines = lines.decode('utf8')
lines = lines.split('\n')
self.state_machine.insert_input(lines, rst_file)
return []
| true | true |
1c3c4ffd84971e02c8f9355aeda7355ea56191d6 | 1,723 | py | Python | src/Parser.py | blankettripod/Pastrel | 2e637a8f2801d784e449369218111924a3927a3a | [
"Apache-2.0"
] | null | null | null | src/Parser.py | blankettripod/Pastrel | 2e637a8f2801d784e449369218111924a3927a3a | [
"Apache-2.0"
] | null | null | null | src/Parser.py | blankettripod/Pastrel | 2e637a8f2801d784e449369218111924a3927a3a | [
"Apache-2.0"
] | null | null | null | import Utility
from Token import *
from Error import Error
from Node import Node
from rule import *
from ruleset import *
class Parser:
def __init__(self, tokens: list[Token], rules: list[Rule]):
self.tokens = tokens
self.rules = rules
def parse(self) -> list[Node]:
# slr(2) parser
stack: list = []
tokenIndex = 0
while tokenIndex < len(self.tokens):
stackIndex = 0
stack.append(self.tokens[tokenIndex])
tokenIndex += 1
while stackIndex < len(stack):
results = self.findAll(stack)
if results is None:
stackIndex += 1
continue
final = results[0]
for result in results:
if result.precedence > final.precedence:
final = result
# this will return the result with the highest precedence
stack = self.reduce(stack, stackIndex, final)
return stack
def findAll(self, stack):
results = []
for rule in self.rules:
results.append(RuleSet.Find(stack, rule))
output = []
for result in results:
if result[0].precedence != -1:
output.append(result[0])
if len(output) > 0:
return output
return None
@staticmethod
def reduce(stack, stackIndex, final) -> list:
stack = stack[stackIndex+1:]
stack.append(final)
return stack
'''
should check all patterns and stack positions for the current stack + 2 more.
all results should be gathered and then the result with highest precedence is chosen
'''
| 24.971014 | 84 | 0.557168 | import Utility
from Token import *
from Error import Error
from Node import Node
from rule import *
from ruleset import *
class Parser:
def __init__(self, tokens: list[Token], rules: list[Rule]):
self.tokens = tokens
self.rules = rules
def parse(self) -> list[Node]:
stack: list = []
tokenIndex = 0
while tokenIndex < len(self.tokens):
stackIndex = 0
stack.append(self.tokens[tokenIndex])
tokenIndex += 1
while stackIndex < len(stack):
results = self.findAll(stack)
if results is None:
stackIndex += 1
continue
final = results[0]
for result in results:
if result.precedence > final.precedence:
final = result
stack = self.reduce(stack, stackIndex, final)
return stack
def findAll(self, stack):
results = []
for rule in self.rules:
results.append(RuleSet.Find(stack, rule))
output = []
for result in results:
if result[0].precedence != -1:
output.append(result[0])
if len(output) > 0:
return output
return None
@staticmethod
def reduce(stack, stackIndex, final) -> list:
stack = stack[stackIndex+1:]
stack.append(final)
return stack
| true | true |
1c3c51aab760ef6aa1d004dafc169d97a2ad0c8f | 23,067 | py | Python | fuzzinator/controller.py | pmatos/fuzzinator | c4af854110cdf2e70fd174b8bf2f6b737b53193a | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/controller.py | pmatos/fuzzinator | c4af854110cdf2e70fd174b8bf2f6b737b53193a | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/controller.py | pmatos/fuzzinator | c4af854110cdf2e70fd174b8bf2f6b737b53193a | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import os
import shutil
import time
import traceback
from math import inf
from multiprocessing import Lock, Process, Queue
import psutil
from inators.imp import import_object
from .config import as_bool, as_int_or_inf, as_path, config_get_callable, config_get_fuzzers, config_get_kwargs
from .job import FuzzJob, ReduceJob, UpdateJob, ValidateJob
from .listener import ListenerManager
from .mongo_driver import MongoDriver
class Controller(object):
"""
Fuzzinator's main controller that orchestrates a fuzz session by scheduling
all related activities (e.g., keeps SUTs up-to-date, runs fuzzers and feeds
test cases to SUTs, or minimizes failure inducing test cases). All
configuration options of the framework must be encapsulated in a
:class:`configparser.ConfigParser` object.
The following config sections and options are recognized:
- Section ``fuzzinator``: Global settings of the framework.
- Option ``work_dir``: Pattern of work directory for temporary files,
which may contain the substring ``{uid}`` as a placeholder for a
unique string (replaced by the framework). (Optional, default:
``~/.fuzzinator/{uid}``)
- Option ``db_uri``: URI to a MongoDB database to store found issues and
execution statistics. (Optional, default:
``mongodb://localhost/fuzzinator``)
- Option ``db_server_selection_timeout``: Controls how long the database
driver will wait to find an available server (in milliseconds).
(Optional, default: 30000)
- Option ``cost_budget``: (Optional, default: number of cpus)
- Option ``validate_after_update``: Boolean to enable the validation
of valid issues of all SUTs after their update.
(Optional, default: ``False``)
- Sections ``sut.NAME``: Definitions of a SUT named *NAME*
- Option ``call``: Fully qualified name of a python callable that must
accept a ``test`` keyword argument representing the input to the SUT
and must return a dictionary object if the input triggered an issue
in the SUT, or a value considered false otherwise (which can be a
simple ``None``, but can also be a ``NonIssue`` in complex cases).
The returned issue dictionary (if any) *should* contain an ``'id'``
field that equals for issues that are not considered unique.
(Mandatory)
See package :mod:`fuzzinator.call` for potential callables.
- Option ``cost``: (Optional, default: 1)
- Option ``reduce``: Fully qualified name of a python callable that must
accept ``issue``, ``sut_call``, ``sut_call_kwargs``, ``listener``,
``ident``, ``work_dir`` keyword arguments representing an issue to be
reduced (and various other potentially needed objects), and must
return a tuple consisting of a reduced test case for the issue (or
``None`` if the issue's current test case could not be reduced) and a
(potentially empty) list of new issues that were discovered during
test case reduction (if any). (Optional, no reduction for this SUT if
option is missing.)
See package :mod:`fuzzinator.reduce` for potential callables.
- Option ``reduce_call``: Fully qualified name of a python callable that
acts as the SUT's ``call`` option during test case reduction.
(Optional, default: the value of option ``call``)
See package :mod:`fuzzinator.call` for potential callables.
- Option ``reduce_cost``: (Optional, default: the value of option
``cost``)
- Option ``validate_call``: Fully qualified name of a python callable
that acts as the SUT's ``call`` option during test case validation.
(Optional, default: the value of option ``reduce_call`` if defined,
otherwise the value of option ``call``)
See package :mod:`fuzzinator.call` for potential callables.
- Option ``validate_cost``: (Optional, default: the value of option
``cost``)
- Option ``update_condition``: Fully qualified name of a python callable
that must return ``True`` if and only if the SUT should be updated.
(Optional, SUT is never updated automatically if option is missing.)
See package :mod:`fuzzinator.update` for potential callables.
- Option ``update``: Fully qualified name of a python callable that
should perform the update of the SUT. (Optional, SUT is never updated
if option is missing.)
See package :mod:`fuzzinator.update` for potential callables.
- Option ``update_cost``: (Optional, default: the value of option
``fuzzinator:cost_budget``)
- Option ``validate_after_update``: Boolean to enable the validation
of the valid issues of the SUT after its update. (Optional, default:
the value of option ``fuzzinator:validate_after_update``)
- Option ``formatter``: Fully qualified name of a python callable that
formats the issue dictionary of the SUT and returns a custom string
representation. It must accept ``issue`` and ``format`` keyword
arguments representing an issue to be formatted and a formatting
instruction. If ``format`` is ``'long'`` or not specified, the issue
should be formatted in full, while if ``'short'`` is given, a
summary description (preferably a single line of text) should be
returned.
(Optional, default: :func:`fuzzinator.formatter.JsonFormatter`.)
See package :mod:`fuzzinator.formatter` for further potential
callables.
- Option ``tui_formatter``: Fully qualified name of a python
callable that formats the issue dictionary of the SUT to display
it in the TUI issue viewer interface.
(Optional, default: the value of option ``formatter``)
See package :mod:`fuzzinator.formatter` for further potential
callables.
- Option ``email_formatter``: Fully qualified name of a python
callable that formats the issue dictionary of the SUT to insert
it into an e-mail notification.
(Optional, default: the value of option ``formatter``)
See package :mod:`fuzzinator.formatter` for further potential
callables.
- Option ``exporter``: Fully qualified name of a python callable that
exports the issue dictionary in a custom SUT-specific format. It must
accept an ``issue`` keyword argument representing the issue to be
exported and its result must be writable to a file, i.e., it must be
either a string or a byte array. The export format does not
necessarily have to contain all elements of the issue dictionary
(e.g., it is often useful to only extract the test input that
triggered the issue). (Optional, no custom export for this SUT if
option is missing.)
See package :mod:`fuzzinator.exporter` for potential callables.
- Sections ``fuzz.NAME``: Definitions of a fuzz job named *NAME*
- Option ``sut``: Name of the SUT that describes the subject of
this fuzz job. (Mandatory)
- Option ``fuzzer``: Fully qualified name of a python callable that must
accept an ``index`` keyword argument representing a running counter
in the fuzz job and must return a test input (or ``None``, which
signals that the fuzzer is "exhausted" and cannot generate more test
cases in this fuzz job). The semantics of the generated test input is
not restricted by the framework, it is up to the configuration to
ensure that the SUT of the fuzz job can deal with the tests generated
by the fuzzer of the fuzz job. (Mandatory)
See package :mod:`fuzzinator.fuzzer` for potential callables.
- Option ``batch``: Number of times the fuzzer is requested to generate
a new test for the SUT. (Optional, default: 1)
- Option ``instances``: Number of instances of this fuzz job allowed to
run in parallel. (Optional, default: ``inf``)
- Option ``refresh``: Statistics update frequency in terms of executed
test cases. (Optional, default: ``batch`` size)
- Section ``listeners``: Definitions of custom event listeners.
This section is optional.
- Options ``OPT``: Fully qualified name of a python class that
executes custom actions for selected events.
See package :mod:`fuzzinator.listeners` for potential listeners.
- Callable options can be implemented as functions or classes with
``__call__`` method (the latter are instantiated first to get a callable
object). Both constructor calls (if any) and the "real" calls can be
given keyword arguments. These arguments have to be specified in
sections ``(sut|fuzz).NAME.OPT[.init]`` with appropriate names (where
the ``.init`` sections stand for the constructor arguments).
- All callables can be decorated according to python semantics. The
decorators must be callable classes themselves and have to be specified
in options ``OPT.decorate(N)`` with fully qualified name. Multiple
decorators can be applied to a callable ``OPT``, their order is
specified by an integer index in parentheses. Keyword arguments to be
passed to the decorators have to be listed in sections
``(sut|fuzz).NAME.OPT.decorate(N)``.
See packages :mod:`fuzzinator.call` and :mod:`fuzzinator.fuzzer` for
potential decorators.
"""
def __init__(self, config):
"""
:param configparser.ConfigParser config: the configuration options of the
fuzz session.
:ivar fuzzinator.ListenerManager listener: a listener manager object that is
called on various events during the fuzz session.
"""
self.config = config
work_dir = self.config.get('fuzzinator', 'work_dir').format(uid=os.getpid())
self.config.set('fuzzinator', 'work_dir', work_dir.replace('$', '$$'))
self.work_dir = as_path(work_dir)
self.fuzzers = config_get_fuzzers(self.config)
self.capacity = int(self.config.get('fuzzinator', 'cost_budget'))
self.validate_after_update = as_bool(self.config.get('fuzzinator', 'validate_after_update'))
self.db = MongoDriver(self.config.get('fuzzinator', 'db_uri'),
int(self.config.get('fuzzinator', 'db_server_selection_timeout')))
self.db.init_db(self.fuzzers)
self.session_start = time.time()
self.session_baseline = self.db.get_stats()
self.listener = ListenerManager()
for name in config_get_kwargs(self.config, 'listeners'):
entity = import_object(self.config.get('listeners', name))
self.listener += entity(config=config, **config_get_kwargs(config, 'listeners.' + name + '.init'))
self._shared_queue = Queue()
self._shared_lock = Lock()
def run(self, *, max_cycles=None, validate=None, reduce=None):
"""
Start the fuzz session.
:param int max_cycles: maximum number to iterate through the fuzz jobs
defined in the configuration (defaults to ``inf``).
:param str validate: name of SUT to validate issues of at the start of
the fuzz session (the empty string denotes all SUTs; defaults to no
SUT).
:param str reduce: name of SUT to reduce issues of at the start of the
fuzz session (the empty string denotes all SUTs; defaults to no
SUT).
"""
max_cycles = max_cycles if max_cycles is not None else inf
cycle = 0
fuzz_idx = 0
fuzz_names = list(self.fuzzers)
load = 0
job_id = 0
job_queue = []
running_jobs = dict()
def _update_load():
current_load = 0
for ident in list(running_jobs):
if not running_jobs[ident]['proc'].is_alive() or not psutil.pid_exists(running_jobs[ident]['proc'].pid):
self.listener.on_job_removed(ident=ident)
del running_jobs[ident]
else:
current_load += running_jobs[ident]['job'].cost
nonlocal load
if load != current_load:
load = current_load
self.listener.on_load_updated(load=load)
def _poll_jobs():
with self._shared_lock:
while not self._shared_queue.empty():
job_class, job_kwargs, priority = self._shared_queue.get_nowait()
if job_class is not None:
_add_job(job_class, job_kwargs, priority)
else:
_cancel_job(**job_kwargs)
def _add_job(job_class, job_kwargs, priority):
nonlocal job_id
next_job = job_class(id=job_id,
config=self.config,
db=self.db,
listener=self.listener,
**job_kwargs)
job_id += 1
if priority:
next_job.cost = 0
{
FuzzJob:
lambda: self.listener.on_fuzz_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
fuzzer=next_job.fuzzer_name,
batch=next_job.batch),
ValidateJob:
lambda: self.listener.on_validate_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
issue_id=next_job.issue['id']),
ReduceJob:
lambda: self.listener.on_reduce_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
issue_id=next_job.issue['id'],
size=len(str(next_job.issue['test']))),
UpdateJob:
lambda: self.listener.on_update_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name),
}[job_class]()
job_queue.insert(0 if priority else len(job_queue), next_job)
def _cancel_job(ident):
if ident in running_jobs:
Controller.kill_process_tree(running_jobs[ident]['proc'].pid)
else:
ident_idx = [job_idx for job_idx, job in enumerate(job_queue) if job.id == ident]
if ident_idx:
self.listener.on_job_removed(ident=ident)
del job_queue[ident_idx[0]]
if validate is not None:
self.validate_all(sut_name=validate)
if reduce is not None:
self.reduce_all(sut_name=reduce)
try:
while True:
# Update load and poll added jobs (if any).
_poll_jobs()
_update_load()
if fuzz_idx == 0:
cycle += 1
if cycle > max_cycles or (not self.fuzzers and max_cycles != inf):
while load > 0:
time.sleep(1)
_poll_jobs() # only to let running jobs cancelled; newly added jobs don't get scheduled
_update_load()
break
# Hunt for new issues only if there is no other work to do.
if not job_queue:
if not self.fuzzers:
time.sleep(1)
continue
# Determine fuzz job to be queued and then update fuzz_idx
# to point to the next job's parameters.
fuzzer_name = fuzz_names[fuzz_idx]
fuzz_section = 'fuzz.' + fuzzer_name
fuzz_idx = (fuzz_idx + 1) % len(self.fuzzers)
# Skip fuzz job if limit on parallel instances is reached.
instances = as_int_or_inf(self.config.get(fuzz_section, 'instances', fallback='inf'))
if instances <= sum(1 for job in running_jobs.values() if isinstance(job['job'], FuzzJob) and job['job'].fuzzer_name == fuzzer_name):
continue
# Before queueing a new fuzz job, check if we are working
# with the latest version of the SUT and queue an update if
# needed.
sut_name = self.config.get(fuzz_section, 'sut')
update_condition, update_condition_kwargs = config_get_callable(self.config, 'sut.' + sut_name, 'update_condition')
if update_condition:
with update_condition:
if update_condition(**update_condition_kwargs):
self.add_update_job(sut_name)
self.add_fuzz_job(fuzzer_name)
# Poll newly added job(s). Looping ensures that jobs will
# eventually arrive.
# (Unfortunately, multiprocessing.Queue.empty() is unreliable.)
while not job_queue:
_poll_jobs()
# Perform next job as soon as there is enough capacity for it.
while True:
if not job_queue:
next_job = None
break
if load + job_queue[0].cost <= self.capacity:
next_job = job_queue.pop(0)
break
time.sleep(1)
_poll_jobs()
_update_load()
if not next_job:
continue
proc = Process(target=self._run_job, args=(next_job,))
running_jobs[next_job.id] = dict(job=next_job, proc=proc)
self.listener.on_job_activated(ident=next_job.id)
proc.start()
except KeyboardInterrupt:
pass
except Exception as e:
self.listener.warning(ident=None, msg='Exception in the main controller loop: {exception}\n{trace}'.format(exception=e, trace=traceback.format_exc()))
finally:
Controller.kill_process_tree(os.getpid(), kill_root=False)
if os.path.exists(self.work_dir):
shutil.rmtree(self.work_dir, ignore_errors=True)
def _run_job(self, job):
try:
for issue in job.run():
# Automatic reduction and/or validation if the job found something new
if not self.add_reduce_job(issue=issue):
self.add_validate_job(issue=issue)
except Exception as e:
self.listener.warning(ident=job.id, msg='Exception in {job}: {exception}\n{trace}'.format(
job=repr(job),
exception=e,
trace=traceback.format_exc()))
def add_fuzz_job(self, fuzzer_name, priority=False):
# Added for the sake of completeness and consistency.
# Should not be used by UI to add fuzz jobs.
with self._shared_lock:
self._shared_queue.put((FuzzJob, dict(fuzzer_name=fuzzer_name, subconfig_id=self.fuzzers[fuzzer_name]['subconfig']), priority))
return True
def add_validate_job(self, issue, priority=False):
if not self.config.has_section('sut.' + issue['sut']):
return False
with self._shared_lock:
self._shared_queue.put((ValidateJob, dict(issue=issue), priority))
return True
def add_reduce_job(self, issue, priority=False):
if not self.config.has_option('sut.' + issue['sut'], 'reduce'):
return False
with self._shared_lock:
self._shared_queue.put((ReduceJob, dict(issue=issue), priority))
return True
def add_update_job(self, sut_name, priority=False):
if not self.config.has_option('sut.' + sut_name, 'update'):
return False
with self._shared_lock:
self._shared_queue.put((UpdateJob, dict(sut_name=sut_name), priority))
if as_bool(self.config.get('sut.' + sut_name, 'validate_after_update', fallback=self.validate_after_update)):
self.validate_all(sut_name)
return True
def validate_all(self, sut_name=None):
sut_name = [sut_name] if sut_name else [section.split('.', maxsplit=1)[1] for section in self.config.sections() if section.startswith('sut.') and section.count('.') == 1]
for issue in self.db.find_issues_by_suts(sut_name):
if not issue.get('invalid'):
self.add_validate_job(issue)
def reduce_all(self, sut_name=None):
sut_name = [sut_name] if sut_name else [section.split('.', maxsplit=1)[1] for section in self.config.sections() if section.startswith('sut.') and section.count('.') == 1]
for issue in self.db.find_issues_by_suts(sut_name):
if not issue.get('reported') and not issue.get('reduced') and not issue.get('invalid'):
self.add_reduce_job(issue)
def cancel_job(self, ident):
with self._shared_lock:
self._shared_queue.put((None, dict(ident=ident), None))
return True
@staticmethod
def kill_process_tree(pid, kill_root=True):
try:
root_proc = psutil.Process(pid)
children = root_proc.children(recursive=True)
if kill_root:
children.append(root_proc)
for proc in children:
try:
proc.terminate()
except psutil.Error:
pass
_, alive = psutil.wait_procs(children, timeout=1)
for proc in alive:
try:
proc.kill()
except psutil.Error:
pass
except psutil.NoSuchProcess:
pass
| 45.497041 | 178 | 0.592621 |
import os
import shutil
import time
import traceback
from math import inf
from multiprocessing import Lock, Process, Queue
import psutil
from inators.imp import import_object
from .config import as_bool, as_int_or_inf, as_path, config_get_callable, config_get_fuzzers, config_get_kwargs
from .job import FuzzJob, ReduceJob, UpdateJob, ValidateJob
from .listener import ListenerManager
from .mongo_driver import MongoDriver
class Controller(object):
def __init__(self, config):
self.config = config
work_dir = self.config.get('fuzzinator', 'work_dir').format(uid=os.getpid())
self.config.set('fuzzinator', 'work_dir', work_dir.replace('$', '$$'))
self.work_dir = as_path(work_dir)
self.fuzzers = config_get_fuzzers(self.config)
self.capacity = int(self.config.get('fuzzinator', 'cost_budget'))
self.validate_after_update = as_bool(self.config.get('fuzzinator', 'validate_after_update'))
self.db = MongoDriver(self.config.get('fuzzinator', 'db_uri'),
int(self.config.get('fuzzinator', 'db_server_selection_timeout')))
self.db.init_db(self.fuzzers)
self.session_start = time.time()
self.session_baseline = self.db.get_stats()
self.listener = ListenerManager()
for name in config_get_kwargs(self.config, 'listeners'):
entity = import_object(self.config.get('listeners', name))
self.listener += entity(config=config, **config_get_kwargs(config, 'listeners.' + name + '.init'))
self._shared_queue = Queue()
self._shared_lock = Lock()
def run(self, *, max_cycles=None, validate=None, reduce=None):
max_cycles = max_cycles if max_cycles is not None else inf
cycle = 0
fuzz_idx = 0
fuzz_names = list(self.fuzzers)
load = 0
job_id = 0
job_queue = []
running_jobs = dict()
def _update_load():
current_load = 0
for ident in list(running_jobs):
if not running_jobs[ident]['proc'].is_alive() or not psutil.pid_exists(running_jobs[ident]['proc'].pid):
self.listener.on_job_removed(ident=ident)
del running_jobs[ident]
else:
current_load += running_jobs[ident]['job'].cost
nonlocal load
if load != current_load:
load = current_load
self.listener.on_load_updated(load=load)
def _poll_jobs():
with self._shared_lock:
while not self._shared_queue.empty():
job_class, job_kwargs, priority = self._shared_queue.get_nowait()
if job_class is not None:
_add_job(job_class, job_kwargs, priority)
else:
_cancel_job(**job_kwargs)
def _add_job(job_class, job_kwargs, priority):
nonlocal job_id
next_job = job_class(id=job_id,
config=self.config,
db=self.db,
listener=self.listener,
**job_kwargs)
job_id += 1
if priority:
next_job.cost = 0
{
FuzzJob:
lambda: self.listener.on_fuzz_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
fuzzer=next_job.fuzzer_name,
batch=next_job.batch),
ValidateJob:
lambda: self.listener.on_validate_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
issue_id=next_job.issue['id']),
ReduceJob:
lambda: self.listener.on_reduce_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
issue_id=next_job.issue['id'],
size=len(str(next_job.issue['test']))),
UpdateJob:
lambda: self.listener.on_update_job_added(ident=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name),
}[job_class]()
job_queue.insert(0 if priority else len(job_queue), next_job)
def _cancel_job(ident):
if ident in running_jobs:
Controller.kill_process_tree(running_jobs[ident]['proc'].pid)
else:
ident_idx = [job_idx for job_idx, job in enumerate(job_queue) if job.id == ident]
if ident_idx:
self.listener.on_job_removed(ident=ident)
del job_queue[ident_idx[0]]
if validate is not None:
self.validate_all(sut_name=validate)
if reduce is not None:
self.reduce_all(sut_name=reduce)
try:
while True:
_poll_jobs()
_update_load()
if fuzz_idx == 0:
cycle += 1
if cycle > max_cycles or (not self.fuzzers and max_cycles != inf):
while load > 0:
time.sleep(1)
_poll_jobs()
_update_load()
break
# Hunt for new issues only if there is no other work to do.
if not job_queue:
if not self.fuzzers:
time.sleep(1)
continue
# Determine fuzz job to be queued and then update fuzz_idx
# to point to the next job's parameters.
fuzzer_name = fuzz_names[fuzz_idx]
fuzz_section = 'fuzz.' + fuzzer_name
fuzz_idx = (fuzz_idx + 1) % len(self.fuzzers)
instances = as_int_or_inf(self.config.get(fuzz_section, 'instances', fallback='inf'))
if instances <= sum(1 for job in running_jobs.values() if isinstance(job['job'], FuzzJob) and job['job'].fuzzer_name == fuzzer_name):
continue
sut_name = self.config.get(fuzz_section, 'sut')
update_condition, update_condition_kwargs = config_get_callable(self.config, 'sut.' + sut_name, 'update_condition')
if update_condition:
with update_condition:
if update_condition(**update_condition_kwargs):
self.add_update_job(sut_name)
self.add_fuzz_job(fuzzer_name)
while not job_queue:
_poll_jobs()
while True:
if not job_queue:
next_job = None
break
if load + job_queue[0].cost <= self.capacity:
next_job = job_queue.pop(0)
break
time.sleep(1)
_poll_jobs()
_update_load()
if not next_job:
continue
proc = Process(target=self._run_job, args=(next_job,))
running_jobs[next_job.id] = dict(job=next_job, proc=proc)
self.listener.on_job_activated(ident=next_job.id)
proc.start()
except KeyboardInterrupt:
pass
except Exception as e:
self.listener.warning(ident=None, msg='Exception in the main controller loop: {exception}\n{trace}'.format(exception=e, trace=traceback.format_exc()))
finally:
Controller.kill_process_tree(os.getpid(), kill_root=False)
if os.path.exists(self.work_dir):
shutil.rmtree(self.work_dir, ignore_errors=True)
def _run_job(self, job):
try:
for issue in job.run():
if not self.add_reduce_job(issue=issue):
self.add_validate_job(issue=issue)
except Exception as e:
self.listener.warning(ident=job.id, msg='Exception in {job}: {exception}\n{trace}'.format(
job=repr(job),
exception=e,
trace=traceback.format_exc()))
def add_fuzz_job(self, fuzzer_name, priority=False):
with self._shared_lock:
self._shared_queue.put((FuzzJob, dict(fuzzer_name=fuzzer_name, subconfig_id=self.fuzzers[fuzzer_name]['subconfig']), priority))
return True
def add_validate_job(self, issue, priority=False):
if not self.config.has_section('sut.' + issue['sut']):
return False
with self._shared_lock:
self._shared_queue.put((ValidateJob, dict(issue=issue), priority))
return True
def add_reduce_job(self, issue, priority=False):
if not self.config.has_option('sut.' + issue['sut'], 'reduce'):
return False
with self._shared_lock:
self._shared_queue.put((ReduceJob, dict(issue=issue), priority))
return True
def add_update_job(self, sut_name, priority=False):
if not self.config.has_option('sut.' + sut_name, 'update'):
return False
with self._shared_lock:
self._shared_queue.put((UpdateJob, dict(sut_name=sut_name), priority))
if as_bool(self.config.get('sut.' + sut_name, 'validate_after_update', fallback=self.validate_after_update)):
self.validate_all(sut_name)
return True
def validate_all(self, sut_name=None):
sut_name = [sut_name] if sut_name else [section.split('.', maxsplit=1)[1] for section in self.config.sections() if section.startswith('sut.') and section.count('.') == 1]
for issue in self.db.find_issues_by_suts(sut_name):
if not issue.get('invalid'):
self.add_validate_job(issue)
def reduce_all(self, sut_name=None):
sut_name = [sut_name] if sut_name else [section.split('.', maxsplit=1)[1] for section in self.config.sections() if section.startswith('sut.') and section.count('.') == 1]
for issue in self.db.find_issues_by_suts(sut_name):
if not issue.get('reported') and not issue.get('reduced') and not issue.get('invalid'):
self.add_reduce_job(issue)
def cancel_job(self, ident):
with self._shared_lock:
self._shared_queue.put((None, dict(ident=ident), None))
return True
@staticmethod
def kill_process_tree(pid, kill_root=True):
try:
root_proc = psutil.Process(pid)
children = root_proc.children(recursive=True)
if kill_root:
children.append(root_proc)
for proc in children:
try:
proc.terminate()
except psutil.Error:
pass
_, alive = psutil.wait_procs(children, timeout=1)
for proc in alive:
try:
proc.kill()
except psutil.Error:
pass
except psutil.NoSuchProcess:
pass
| true | true |
1c3c522b701a45cbf827f98dfc6a46b019906c0d | 27,084 | py | Python | dace/codegen/targets/framecode.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
] | 1 | 2021-07-26T07:58:06.000Z | 2021-07-26T07:58:06.000Z | dace/codegen/targets/framecode.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
] | null | null | null | dace/codegen/targets/framecode.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from typing import Optional, Set, Tuple
import collections
import copy
import dace
import functools
import re
from dace.codegen import control_flow as cflow
from dace.codegen import dispatcher as disp
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.targets.common import codeblock_to_cpp, sym2cpp
from dace.codegen.targets.cpp import unparse_interstate_edge
from dace.codegen.targets.target import TargetCodeGenerator
from dace.sdfg import SDFG, SDFGState, ScopeSubgraphView
from dace.sdfg import nodes
from dace.sdfg.infer_types import set_default_schedule_and_storage_types
from dace import dtypes, data, config
from typing import Any, List
from dace.frontend.python import wrappers
import networkx as nx
import numpy as np
def _get_or_eval_sdfg_first_arg(func, sdfg):
if callable(func):
return func(sdfg)
return func
class DaCeCodeGenerator(object):
""" DaCe code generator class that writes the generated code for SDFG
state machines, and uses a dispatcher to generate code for
individual states based on the target. """
def __init__(self, *args, **kwargs):
self._dispatcher = disp.TargetDispatcher(self)
self._dispatcher.register_state_dispatcher(self)
self._initcode = CodeIOStream()
self._exitcode = CodeIOStream()
self.statestruct: List[str] = []
self.environments: List[Any] = []
##################################################################
# Target registry
@property
def dispatcher(self):
return self._dispatcher
##################################################################
# Code generation
def generate_constants(self, sdfg: SDFG, callsite_stream: CodeIOStream):
# Write constants
for cstname, (csttype, cstval) in sdfg.constants_prop.items():
if isinstance(csttype, data.Array):
const_str = "constexpr " + csttype.dtype.ctype + \
" " + cstname + "[" + str(cstval.size) + "] = {"
it = np.nditer(cstval, order='C')
for i in range(cstval.size - 1):
const_str += str(it[0]) + ", "
it.iternext()
const_str += str(it[0]) + "};\n"
callsite_stream.write(const_str, sdfg)
else:
callsite_stream.write(
"constexpr %s %s = %s;\n" %
(csttype.dtype.ctype, cstname, sym2cpp(cstval)), sdfg)
def generate_fileheader(self,
sdfg: SDFG,
global_stream: CodeIOStream,
backend: str = 'frame'):
""" Generate a header in every output file that includes custom types
and constants.
:param sdfg: The input SDFG.
:param global_stream: Stream to write to (global).
:param backend: Whose backend this header belongs to.
"""
# Hash file include
if backend == 'frame':
global_stream.write('#include "../../include/hash.h"\n', sdfg)
#########################################################
# Environment-based includes
for env in self.environments:
if len(env.headers) > 0:
global_stream.write(
"\n".join("#include \"" + h + "\"" for h in env.headers),
sdfg)
#########################################################
# Custom types
datatypes = set()
# Types of this SDFG
for _, arrname, arr in sdfg.arrays_recursive():
if arr is not None:
datatypes.add(arr.dtype)
# Emit unique definitions
wrote_something = False
for typ in datatypes:
if hasattr(typ, 'emit_definition'):
if not wrote_something:
global_stream.write("", sdfg)
wrote_something = True
global_stream.write(typ.emit_definition(), sdfg)
if wrote_something:
global_stream.write("", sdfg)
#########################################################
# Write constants
self.generate_constants(sdfg, global_stream)
#########################################################
# Write state struct
structstr = '\n'.join(self.statestruct)
global_stream.write(
f'''
struct {sdfg.name}_t {{
{structstr}
}};
''', sdfg)
for sd in sdfg.all_sdfgs_recursive():
if None in sd.global_code:
global_stream.write(codeblock_to_cpp(sd.global_code[None]), sd)
if backend in sd.global_code:
global_stream.write(codeblock_to_cpp(sd.global_code[backend]),
sd)
def generate_header(self, sdfg: SDFG, global_stream: CodeIOStream,
callsite_stream: CodeIOStream):
""" Generate the header of the frame-code. Code exists in a separate
function for overriding purposes.
:param sdfg: The input SDFG.
:param global_stream: Stream to write to (global).
:param callsite_stream: Stream to write to (at call site).
"""
# Write frame code - header
global_stream.write(
'/* DaCe AUTO-GENERATED FILE. DO NOT MODIFY */\n' +
'#include <dace/dace.h>\n', sdfg)
# Write header required by environments
for env in self.environments:
self.statestruct.extend(env.state_fields)
# Instrumentation preamble
if len(self._dispatcher.instrumentation) > 1:
self.statestruct.append('dace::perf::Report report;')
# Reset report if written every invocation
if config.Config.get_bool('instrumentation',
'report_each_invocation'):
callsite_stream.write('__state->report.reset();', sdfg)
self.generate_fileheader(sdfg, global_stream, 'frame')
def generate_footer(self, sdfg: SDFG, global_stream: CodeIOStream,
callsite_stream: CodeIOStream):
""" Generate the footer of the frame-code. Code exists in a separate
function for overriding purposes.
:param sdfg: The input SDFG.
:param global_stream: Stream to write to (global).
:param callsite_stream: Stream to write to (at call site).
"""
import dace.library
fname = sdfg.name
params = sdfg.signature()
paramnames = sdfg.signature(False, for_call=True)
initparams = sdfg.signature(with_arrays=False)
initparamnames = sdfg.signature(False, for_call=True, with_arrays=False)
# Invoke all instrumentation providers
for instr in self._dispatcher.instrumentation.values():
if instr is not None:
instr.on_sdfg_end(sdfg, callsite_stream, global_stream)
# Instrumentation saving
if (config.Config.get_bool('instrumentation', 'report_each_invocation')
and len(self._dispatcher.instrumentation) > 1):
callsite_stream.write(
'''__state->report.save("{path}/perf", __HASH_{name});'''
.format(path=sdfg.build_folder.replace('\\', '/'),
name=sdfg.name), sdfg)
# Write closing brace of program
callsite_stream.write('}', sdfg)
# Write awkward footer to avoid 'extern "C"' issues
params_comma = (', ' + params) if params else ''
initparams_comma = (', ' + initparams) if initparams else ''
paramnames_comma = (', ' + paramnames) if paramnames else ''
initparamnames_comma = (', ' + initparamnames) if initparamnames else ''
callsite_stream.write(
f'''
DACE_EXPORTED void __program_{fname}({fname}_t *__state{params_comma})
{{
__program_{fname}_internal(__state{paramnames_comma});
}}''', sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(
'DACE_EXPORTED int __dace_init_%s(%s_t *__state%s);\n' %
(target.target_name, sdfg.name, initparams_comma), sdfg)
if target.has_finalizer:
callsite_stream.write(
'DACE_EXPORTED int __dace_exit_%s(%s_t *__state);\n' %
(target.target_name, sdfg.name), sdfg)
callsite_stream.write(
f"""
DACE_EXPORTED {sdfg.name}_t *__dace_init_{sdfg.name}({initparams})
{{
int __result = 0;
{sdfg.name}_t *__state = new {sdfg.name}_t;
""", sdfg)
for target in self._dispatcher.used_targets:
if target.has_initializer:
callsite_stream.write(
'__result |= __dace_init_%s(__state%s);' %
(target.target_name, initparamnames_comma), sdfg)
for env in self.environments:
init_code = _get_or_eval_sdfg_first_arg(env.init_code, sdfg)
if init_code:
callsite_stream.write("{ // Environment: " + env.__name__,
sdfg)
callsite_stream.write(init_code)
callsite_stream.write("}")
for sd in sdfg.all_sdfgs_recursive():
if None in sd.init_code:
callsite_stream.write(codeblock_to_cpp(sd.init_code[None]), sd)
callsite_stream.write(codeblock_to_cpp(sd.init_code['frame']), sd)
callsite_stream.write(self._initcode.getvalue(), sdfg)
callsite_stream.write(
f"""
if (__result) {{
delete __state;
return nullptr;
}}
return __state;
}}
DACE_EXPORTED void __dace_exit_{sdfg.name}({sdfg.name}_t *__state)
{{
""", sdfg)
# Instrumentation saving
if (not config.Config.get_bool('instrumentation',
'report_each_invocation')
and len(self._dispatcher.instrumentation) > 1):
callsite_stream.write(
'__state->report.save("%s/perf", __HASH_%s);' %
(sdfg.build_folder.replace('\\', '/'), sdfg.name), sdfg)
callsite_stream.write(self._exitcode.getvalue(), sdfg)
for sd in sdfg.all_sdfgs_recursive():
if None in sd.exit_code:
callsite_stream.write(codeblock_to_cpp(sd.exit_code[None]), sd)
callsite_stream.write(codeblock_to_cpp(sd.exit_code['frame']), sd)
for target in self._dispatcher.used_targets:
if target.has_finalizer:
callsite_stream.write(
'__dace_exit_%s(__state);' % target.target_name, sdfg)
for env in reversed(self.environments):
finalize_code = _get_or_eval_sdfg_first_arg(env.finalize_code, sdfg)
if finalize_code:
callsite_stream.write("{ // Environment: " + env.__name__,
sdfg)
callsite_stream.write(finalize_code)
callsite_stream.write("}")
callsite_stream.write('delete __state;\n}\n', sdfg)
def generate_state(self,
sdfg,
state,
global_stream,
callsite_stream,
generate_state_footer=True):
sid = sdfg.node_id(state)
# Emit internal transient array allocation
# Don't allocate transients shared with another state
data_to_allocate = (set(state.top_level_transients()) -
set(sdfg.shared_transients()))
allocated = set()
for node in state.data_nodes():
if node.data not in data_to_allocate or node.data in allocated:
continue
allocated.add(node.data)
self._dispatcher.dispatch_allocate(sdfg, state, sid, node,
global_stream, callsite_stream)
callsite_stream.write('\n')
# Emit internal transient array allocation for nested SDFGs
# TODO: Replace with global allocation management
gpu_persistent_subgraphs = [
state.scope_subgraph(node) for node in state.nodes()
if isinstance(node, dace.nodes.MapEntry)
and node.map.schedule == dace.ScheduleType.GPU_Persistent
]
nested_allocated = set()
for sub_graph in gpu_persistent_subgraphs:
for nested_sdfg in [
n.sdfg for n in sub_graph.nodes()
if isinstance(n, nodes.NestedSDFG)
]:
nested_shared_transients = set(nested_sdfg.shared_transients())
for nested_state in nested_sdfg.nodes():
nested_sid = nested_sdfg.node_id(nested_state)
nested_to_allocate = (
set(nested_state.top_level_transients()) -
nested_shared_transients)
nodes_to_allocate = [
n for n in nested_state.data_nodes()
if n.data in nested_to_allocate
and n.data not in nested_allocated
]
for nested_node in nodes_to_allocate:
nested_allocated.add(nested_node.data)
self._dispatcher.dispatch_allocate(
nested_sdfg, nested_state, nested_sid, nested_node,
global_stream, callsite_stream)
callsite_stream.write('\n')
# Invoke all instrumentation providers
for instr in self._dispatcher.instrumentation.values():
if instr is not None:
instr.on_state_begin(sdfg, state, callsite_stream,
global_stream)
#####################
# Create dataflow graph for state's children.
# DFG to code scheme: Only generate code for nodes whose all
# dependencies have been executed (topological sort).
# For different connected components, run them concurrently.
components = dace.sdfg.concurrent_subgraphs(state)
if len(components) == 1:
self._dispatcher.dispatch_subgraph(sdfg,
state,
sid,
global_stream,
callsite_stream,
skip_entry_node=False)
else:
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("#pragma omp parallel sections\n{")
for c in components:
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("#pragma omp section\n{")
self._dispatcher.dispatch_subgraph(sdfg,
c,
sid,
global_stream,
callsite_stream,
skip_entry_node=False)
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("} // End omp section")
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("} // End omp sections")
#####################
# Write state footer
if generate_state_footer:
# Emit internal transient array deallocation for nested SDFGs
# TODO: Replace with global allocation management
gpu_persistent_subgraphs = [
state.scope_subgraph(node) for node in state.nodes()
if isinstance(node, dace.nodes.MapEntry)
and node.map.schedule == dace.ScheduleType.GPU_Persistent
]
nested_deallocated = set()
for sub_graph in gpu_persistent_subgraphs:
for nested_sdfg in [
n.sdfg for n in sub_graph.nodes()
if isinstance(n, nodes.NestedSDFG)
]:
nested_shared_transients = \
set(nested_sdfg.shared_transients())
for nested_state in nested_sdfg:
nested_sid = nested_sdfg.node_id(nested_state)
nested_to_allocate = (
set(nested_state.top_level_transients()) -
nested_shared_transients)
nodes_to_deallocate = [
n for n in nested_state.data_nodes()
if n.data in nested_to_allocate
and n.data not in nested_deallocated
]
for nested_node in nodes_to_deallocate:
nested_deallocated.add(nested_node.data)
self._dispatcher.dispatch_deallocate(
nested_sdfg, nested_state, nested_sid,
nested_node, global_stream, callsite_stream)
# Emit internal transient array deallocation
deallocated = set()
for node in state.data_nodes():
if (node.data not in data_to_allocate
or node.data in deallocated
or (node.data in sdfg.arrays
and sdfg.arrays[node.data].transient == False)):
continue
deallocated.add(node.data)
self._dispatcher.dispatch_deallocate(sdfg, state, sid, node,
global_stream,
callsite_stream)
# Invoke all instrumentation providers
for instr in self._dispatcher.instrumentation.values():
if instr is not None:
instr.on_state_end(sdfg, state, callsite_stream,
global_stream)
def generate_states(self, sdfg, global_stream, callsite_stream):
states_generated = set()
# Create closure + function for state dispatcher
def dispatch_state(state: SDFGState) -> str:
stream = CodeIOStream()
self._dispatcher.dispatch_state(sdfg, state, global_stream, stream)
states_generated.add(state) # For sanity check
return stream.getvalue()
# Handle specialized control flow
if config.Config.get_bool('optimizer', 'detect_control_flow'):
# Avoid import loop
from dace.transformation import helpers as xfh
# Clean up the state machine by separating combined condition and assignment
# edges.
xfh.split_interstate_edges(sdfg)
cft = cflow.structured_control_flow_tree(sdfg, dispatch_state)
else:
# If disabled, generate entire graph as general control flow block
states_topological = list(sdfg.topological_sort(sdfg.start_state))
last = states_topological[-1]
cft = cflow.GeneralBlock(dispatch_state, [
cflow.SingleState(dispatch_state, s, s is last)
for s in states_topological
], [])
callsite_stream.write(
cft.as_cpp(self.dispatcher.defined_vars, sdfg.symbols), sdfg)
# Write exit label
callsite_stream.write(f'__state_exit_{sdfg.sdfg_id}:;', sdfg)
return states_generated
def generate_code(
self,
sdfg: SDFG,
schedule: Optional[dtypes.ScheduleType],
sdfg_id: str = ""
) -> Tuple[str, str, Set[TargetCodeGenerator], Set[str]]:
""" Generate frame code for a given SDFG, calling registered targets'
code generation callbacks for them to generate their own code.
:param sdfg: The SDFG to generate code for.
:param schedule: The schedule the SDFG is currently located, or
None if the SDFG is top-level.
:param sdfg_id: An optional string id given to the SDFG label
:return: A tuple of the generated global frame code, local frame
code, and a set of targets that have been used in the
generation of this SDFG.
"""
if len(sdfg_id) == 0 and sdfg.sdfg_id != 0:
sdfg_id = '_%d' % sdfg.sdfg_id
global_stream = CodeIOStream()
callsite_stream = CodeIOStream()
is_top_level = sdfg.parent is None
# Generate code
###########################
# Keep track of allocated variables
allocated = set()
# Add symbol mappings to allocated variables
if sdfg.parent_nsdfg_node is not None:
allocated |= sdfg.parent_nsdfg_node.symbol_mapping.keys()
# Invoke all instrumentation providers
for instr in self._dispatcher.instrumentation.values():
if instr is not None:
instr.on_sdfg_begin(sdfg, callsite_stream, global_stream)
# Allocate outer-level transients
shared_transients = sdfg.shared_transients()
for state in sdfg.nodes():
for node in state.data_nodes():
if (node.data in shared_transients
and node.data not in allocated):
self._dispatcher.dispatch_allocate(sdfg, state, None, node,
global_stream,
callsite_stream)
allocated.add(node.data)
# Allocate inter-state variables
global_symbols = copy.deepcopy(sdfg.symbols)
global_symbols.update(
{aname: arr.dtype
for aname, arr in sdfg.arrays.items()})
interstate_symbols = {}
for e in sdfg.edges():
symbols = e.data.new_symbols(global_symbols)
# Inferred symbols only take precedence if global symbol not defined
symbols = {
k: v if k not in global_symbols else global_symbols[k]
for k, v in symbols.items()
}
interstate_symbols.update(symbols)
global_symbols.update(symbols)
for isvarName, isvarType in interstate_symbols.items():
# Skip symbols that have been declared as outer-level transients
if isvarName in allocated:
continue
isvar = data.Scalar(isvarType)
callsite_stream.write(
'%s;\n' % (isvar.as_arg(with_types=True, name=isvarName)), sdfg)
self.dispatcher.defined_vars.add(isvarName, isvarType,
isvarType.ctype)
callsite_stream.write('\n', sdfg)
#######################################################################
# Generate actual program body
states_generated = self.generate_states(sdfg, global_stream,
callsite_stream)
#######################################################################
# Sanity check
if len(states_generated) != len(sdfg.nodes()):
raise RuntimeError(
"Not all states were generated in SDFG {}!"
"\n Generated: {}\n Missing: {}".format(
sdfg.label, [s.label for s in states_generated],
[s.label for s in (set(sdfg.nodes()) - states_generated)]))
# Deallocate transients
shared_transients = sdfg.shared_transients()
deallocated = set()
for state in sdfg.nodes():
for node in state.data_nodes():
if (node.data in shared_transients
and node.data not in deallocated):
self._dispatcher.dispatch_deallocate(
sdfg, state, None, node, global_stream, callsite_stream)
deallocated.add(node.data)
# Now that we have all the information about dependencies, generate
# header and footer
if is_top_level:
# Let each target append code to frame code state before generating
# header and footer
for target in self._dispatcher.used_targets:
target.on_target_used()
header_stream = CodeIOStream()
header_global_stream = CodeIOStream()
footer_stream = CodeIOStream()
footer_global_stream = CodeIOStream()
# Get all environments used in the generated code, including
# dependent environments
import dace.library # Avoid import loops
self.environments = dace.library.get_environments_and_dependencies(
self._dispatcher.used_environments)
self.generate_header(sdfg, header_global_stream, header_stream)
# Open program function
params = sdfg.signature()
if params:
params = ', ' + params
function_signature = (
'void __program_%s_internal(%s_t *__state%s)\n{\n' %
(sdfg.name, sdfg.name, params))
self.generate_footer(sdfg, footer_global_stream, footer_stream)
header_global_stream.write(global_stream.getvalue())
header_global_stream.write(footer_global_stream.getvalue())
generated_header = header_global_stream.getvalue()
all_code = CodeIOStream()
all_code.write(function_signature)
all_code.write(header_stream.getvalue())
all_code.write(callsite_stream.getvalue())
all_code.write(footer_stream.getvalue())
generated_code = all_code.getvalue()
else:
generated_header = global_stream.getvalue()
generated_code = callsite_stream.getvalue()
# Clean up generated code
gotos = re.findall(r'goto (.*);', generated_code)
clean_code = ''
for line in generated_code.split('\n'):
# Empty line with semicolon
if re.match(r'^\s*;\s*', line):
continue
# Label that might be unused
label = re.findall(
r'^\s*([a-zA-Z_][a-zA-Z_0-9]*):\s*[;]?\s*////.*$', line)
if len(label) > 0:
if label[0] not in gotos:
continue
clean_code += line + '\n'
# Return the generated global and local code strings
return (generated_header, clean_code, self._dispatcher.used_targets,
self._dispatcher.used_environments)
| 42.384977 | 88 | 0.553796 |
from typing import Optional, Set, Tuple
import collections
import copy
import dace
import functools
import re
from dace.codegen import control_flow as cflow
from dace.codegen import dispatcher as disp
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.targets.common import codeblock_to_cpp, sym2cpp
from dace.codegen.targets.cpp import unparse_interstate_edge
from dace.codegen.targets.target import TargetCodeGenerator
from dace.sdfg import SDFG, SDFGState, ScopeSubgraphView
from dace.sdfg import nodes
from dace.sdfg.infer_types import set_default_schedule_and_storage_types
from dace import dtypes, data, config
from typing import Any, List
from dace.frontend.python import wrappers
import networkx as nx
import numpy as np
def _get_or_eval_sdfg_first_arg(func, sdfg):
if callable(func):
return func(sdfg)
return func
class DaCeCodeGenerator(object):
def __init__(self, *args, **kwargs):
self._dispatcher = disp.TargetDispatcher(self)
self._dispatcher.register_state_dispatcher(self)
self._initcode = CodeIOStream()
self._exitcode = CodeIOStream()
self.statestruct: List[str] = []
self.environments: List[Any] = []
# Create dataflow graph for state's children.
components = dace.sdfg.concurrent_subgraphs(state)
if len(components) == 1:
self._dispatcher.dispatch_subgraph(sdfg,
state,
sid,
global_stream,
callsite_stream,
skip_entry_node=False)
else:
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("#pragma omp parallel sections\n{")
for c in components:
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("#pragma omp section\n{")
self._dispatcher.dispatch_subgraph(sdfg,
c,
sid,
global_stream,
callsite_stream,
skip_entry_node=False)
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("} // End omp section")
if config.Config.get_bool('compiler', 'cpu', 'openmp_sections'):
callsite_stream.write("} // End omp sections")
e(node, dace.nodes.MapEntry)
and node.map.schedule == dace.ScheduleType.GPU_Persistent
]
nested_deallocated = set()
for sub_graph in gpu_persistent_subgraphs:
for nested_sdfg in [
n.sdfg for n in sub_graph.nodes()
if isinstance(n, nodes.NestedSDFG)
]:
nested_shared_transients = \
set(nested_sdfg.shared_transients())
for nested_state in nested_sdfg:
nested_sid = nested_sdfg.node_id(nested_state)
nested_to_allocate = (
set(nested_state.top_level_transients()) -
nested_shared_transients)
nodes_to_deallocate = [
n for n in nested_state.data_nodes()
if n.data in nested_to_allocate
and n.data not in nested_deallocated
]
for nested_node in nodes_to_deallocate:
nested_deallocated.add(nested_node.data)
self._dispatcher.dispatch_deallocate(
nested_sdfg, nested_state, nested_sid,
nested_node, global_stream, callsite_stream)
deallocated = set()
for node in state.data_nodes():
if (node.data not in data_to_allocate
or node.data in deallocated
or (node.data in sdfg.arrays
and sdfg.arrays[node.data].transient == False)):
continue
deallocated.add(node.data)
self._dispatcher.dispatch_deallocate(sdfg, state, sid, node,
global_stream,
callsite_stream)
for instr in self._dispatcher.instrumentation.values():
if instr is not None:
instr.on_state_end(sdfg, state, callsite_stream,
global_stream)
def generate_states(self, sdfg, global_stream, callsite_stream):
states_generated = set()
def dispatch_state(state: SDFGState) -> str:
stream = CodeIOStream()
self._dispatcher.dispatch_state(sdfg, state, global_stream, stream)
states_generated.add(state)
return stream.getvalue()
if config.Config.get_bool('optimizer', 'detect_control_flow'):
from dace.transformation import helpers as xfh
xfh.split_interstate_edges(sdfg)
cft = cflow.structured_control_flow_tree(sdfg, dispatch_state)
else:
states_topological = list(sdfg.topological_sort(sdfg.start_state))
last = states_topological[-1]
cft = cflow.GeneralBlock(dispatch_state, [
cflow.SingleState(dispatch_state, s, s is last)
for s in states_topological
], [])
callsite_stream.write(
cft.as_cpp(self.dispatcher.defined_vars, sdfg.symbols), sdfg)
callsite_stream.write(f'__state_exit_{sdfg.sdfg_id}:;', sdfg)
return states_generated
def generate_code(
self,
sdfg: SDFG,
schedule: Optional[dtypes.ScheduleType],
sdfg_id: str = ""
) -> Tuple[str, str, Set[TargetCodeGenerator], Set[str]]:
if len(sdfg_id) == 0 and sdfg.sdfg_id != 0:
sdfg_id = '_%d' % sdfg.sdfg_id
global_stream = CodeIOStream()
callsite_stream = CodeIOStream()
is_top_level = sdfg.parent is None
shared_transients = sdfg.shared_transients()
for state in sdfg.nodes():
for node in state.data_nodes():
if (node.data in shared_transients
and node.data not in allocated):
self._dispatcher.dispatch_allocate(sdfg, state, None, node,
global_stream,
callsite_stream)
allocated.add(node.data)
global_symbols = copy.deepcopy(sdfg.symbols)
global_symbols.update(
{aname: arr.dtype
for aname, arr in sdfg.arrays.items()})
interstate_symbols = {}
for e in sdfg.edges():
symbols = e.data.new_symbols(global_symbols)
symbols = {
k: v if k not in global_symbols else global_symbols[k]
for k, v in symbols.items()
}
interstate_symbols.update(symbols)
global_symbols.update(symbols)
for isvarName, isvarType in interstate_symbols.items():
if isvarName in allocated:
continue
isvar = data.Scalar(isvarType)
callsite_stream.write(
'%s;\n' % (isvar.as_arg(with_types=True, name=isvarName)), sdfg)
self.dispatcher.defined_vars.add(isvarName, isvarType,
isvarType.ctype)
callsite_stream.write('\n', sdfg)
| true | true |
1c3c5264750eed447ffd84dd0522f448f45fe831 | 69 | py | Python | double3/double3sdk/tilt/tilt.py | CLOMING/winter2021_double | 9b920baaeb3736a785a6505310b972c49b5b21e9 | [
"Apache-2.0"
] | null | null | null | double3/double3sdk/tilt/tilt.py | CLOMING/winter2021_double | 9b920baaeb3736a785a6505310b972c49b5b21e9 | [
"Apache-2.0"
] | null | null | null | double3/double3sdk/tilt/tilt.py | CLOMING/winter2021_double | 9b920baaeb3736a785a6505310b972c49b5b21e9 | [
"Apache-2.0"
] | null | null | null | from double3sdk.double_api import _DoubleAPI
class _Tilt:
pass
| 11.5 | 44 | 0.782609 | from double3sdk.double_api import _DoubleAPI
class _Tilt:
pass
| true | true |
1c3c53632c876445c634eb0c6daafc59ef50abc0 | 17,388 | py | Python | tests/test_filtering.py | nextfit/tortoise-orm | 06bcecaf769d12c21c0da950950f030c8e74659a | [
"Apache-2.0"
] | null | null | null | tests/test_filtering.py | nextfit/tortoise-orm | 06bcecaf769d12c21c0da950950f030c8e74659a | [
"Apache-2.0"
] | null | null | null | tests/test_filtering.py | nextfit/tortoise-orm | 06bcecaf769d12c21c0da950950f030c8e74659a | [
"Apache-2.0"
] | null | null | null | from pypika.functions import Coalesce, Count, Length, Lower, Trim, Upper
from tests.testmodels import Event, IntFields, Reporter, Team, Tournament
from tortoise.contrib import test
from tortoise.exceptions import FieldError
from tortoise.filters.q import Q
from tortoise.query.expressions import F
class TestFiltering(test.TortoiseTransactionedTestModelsTestCase):
async def test_filtering(self):
tournament = Tournament(name="Tournament")
await tournament.save()
second_tournament = Tournament(name="Tournament 2")
await second_tournament.save()
event_first = Event(name="1", tournament=tournament)
await event_first.save()
event_second = Event(name="2", tournament=second_tournament)
await event_second.save()
event_third = Event(name="3", tournament=tournament)
await event_third.save()
event_forth = Event(name="4", tournament=second_tournament)
await event_forth.save()
team_first = Team(name="First")
await team_first.save()
team_second = Team(name="Second")
await team_second.save()
await team_first.events.add(event_first)
await event_second.participants.add(team_second)
found_events = (
await Event.filter(Q(id__in=[event_first.id, event_second.id]) | Q(name="3"))
.filter(participants__not=team_second.id)
.order_by("name", "tournament_id")
.distinct()
)
self.assertEqual(len(found_events), 2)
self.assertEqual(found_events[0].id, event_first.id)
self.assertEqual(found_events[1].id, event_third.id)
await Team.filter(events__tournament_id=tournament.id).order_by("-events__name")
await Tournament.filter(events__name__in=["1", "3"]).distinct()
teams = await Team.filter(name__icontains="CON")
self.assertEqual(len(teams), 1)
self.assertEqual(teams[0].name, "Second")
teams = await Team.filter(name__iexact="SeCoNd")
self.assertEqual(len(teams), 1)
self.assertEqual(teams[0].name, "Second")
tournaments = await Tournament.filter(events__participants__name__startswith="Fir")
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0], tournament)
async def test_q_object_backward_related_query(self):
await Tournament.create(name="0")
tournament = await Tournament.create(name="Tournament")
event = await Event.create(name="1", tournament=tournament)
fetched_tournament = await Tournament.filter(events=event.id).first()
self.assertEqual(fetched_tournament.id, tournament.id)
fetched_tournament = await Tournament.filter(Q(events=event.id)).first()
self.assertEqual(fetched_tournament.id, tournament.id)
async def test_q_object_related_query(self):
tournament_first = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
event = await Event.create(name="1", tournament=tournament_second)
await Event.create(name="1", tournament=tournament_first)
fetched_event = await Event.filter(tournament=tournament_second).first()
self.assertEqual(fetched_event.id, event.id)
fetched_event = await Event.filter(Q(tournament=tournament_second)).first()
self.assertEqual(fetched_event.id, event.id)
fetched_event = await Event.filter(Q(tournament=tournament_second.id)).first()
self.assertEqual(fetched_event.id, event.id)
async def test_null_filter(self):
tournament = await Tournament.create(name="Tournament")
reporter = await Reporter.create(name="John")
await Event.create(name="2", tournament=tournament, reporter=reporter)
event = await Event.create(name="1", tournament=tournament)
fetched_events = await Event.filter(reporter=None)
self.assertEqual(len(fetched_events), 1)
self.assertEqual(fetched_events[0].id, event.id)
async def test_exclude(self):
await Tournament.create(name="0")
tournament = await Tournament.create(name="1")
tournaments = await Tournament.exclude(name="0")
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].name, tournament.name)
async def test_exclude_with_filter(self):
await Tournament.create(name="0")
tournament = await Tournament.create(name="1")
await Tournament.create(name="2")
tournaments = await Tournament.exclude(name="0").filter(id=tournament.id)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].name, tournament.name)
async def test_filter_null_on_related(self):
tournament = await Tournament.create(name="Tournament")
reporter = await Reporter.create(name="John")
event_first = await Event.create(name="1", tournament=tournament, reporter=reporter)
event_second = await Event.create(name="2", tournament=tournament)
team_first = await Team.create(name="1")
team_second = await Team.create(name="2")
await event_first.participants.add(team_first)
await event_second.participants.add(team_second)
fetched_teams = await Team.filter(events__reporter=None)
self.assertEqual(len(fetched_teams), 1)
self.assertEqual(fetched_teams[0].id, team_second.id)
async def test_filter_or(self):
await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
tournaments = await Tournament.filter(Q(name="1") | Q(name="2"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"1", "2"})
async def test_filter_not(self):
await Tournament.create(name="0")
await Tournament.create(name="1")
tournaments = await Tournament.filter(~Q(name="1"))
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].name, "0")
async def test_filter_not_with_or(self):
await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
tournaments = await Tournament.filter(Q(name="1") | ~Q(name="2"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "1"})
async def test_filter_with_f_expression(self):
await IntFields.create(intnum=1, intnum_null=1)
await IntFields.create(intnum=2, intnum_null=1)
self.assertEqual(await IntFields.filter(intnum=F("intnum_null")).count(), 1)
self.assertEqual(await IntFields.filter(intnum__gte=F("intnum_null")).count(), 2)
self.assertEqual(
await IntFields.filter(intnum=F("intnum_null") + F("intnum_null")).count(), 1
)
async def test_filter_by_aggregation_field(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Event.create(name="2", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(events_count=1)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_and(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
events_count=1, name="0"
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_and_as_one_node(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1, name="0")
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_and_as_two_nodes(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1) & Q(name="0")
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_or(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1) | Q(name="2")
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "2"})
async def test_filter_by_aggregation_field_with_or_reversed(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(name="2") | Q(events_count=1)
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "2"})
async def test_filter_by_aggregation_field_with_or_as_one_node(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1, name="2", join_type=Q.OR)
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "2"})
async def test_filter_by_aggregation_field_with_not(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
~Q(events_count=1, name="0")
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament_second.id)
async def test_filter_by_aggregation_field_with_or_not(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
~(Q(events_count=1) | Q(name="2"))
)
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({t.name for t in tournaments}, {"1"})
async def test_filter_by_aggregation_field_with_or_not_reversed(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
~(Q(name="2") | Q(events_count=1))
)
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({t.name for t in tournaments}, {"1"})
async def test_filter_by_aggregation_field_trim(self):
await Tournament.create(name=" 1 ")
await Tournament.create(name="2 ")
tournaments = await Tournament.annotate(trimmed_name=Trim("name")).filter(trimmed_name="1")
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({(t.name, t.trimmed_name) for t in tournaments}, {(" 1 ", "1")})
async def test_filter_by_aggregation_field_length(self):
await Tournament.create(name="12345")
await Tournament.create(name="123")
await Tournament.create(name="1234")
tournaments = await Tournament.annotate(name_len=Length("name")).filter(name_len__gte=4)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"1234", "12345"})
async def test_filter_by_aggregation_field_coalesce(self):
await Tournament.create(name="1", desc="demo")
await Tournament.create(name="2")
tournaments = await Tournament.annotate(clean_desc=Coalesce("desc", "demo")).filter(
clean_desc="demo"
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual(
{(t.name, t.clean_desc) for t in tournaments}, {("1", "demo"), ("2", "demo")}
)
async def test_filter_by_aggregation_field_coalesce_numeric(self):
await IntFields.create(intnum=1, intnum_null=10)
await IntFields.create(intnum=4)
ints = await IntFields.annotate(clean_intnum_null=Coalesce("intnum_null", 0)).filter(
clean_intnum_null__in=(0, 10)
)
self.assertEqual(len(ints), 2)
self.assertSetEqual(
{(i.intnum_null, i.clean_intnum_null) for i in ints}, {(None, 0), (10, 10)}
)
async def test_filter_by_aggregation_field_comparison_coalesce_numeric(self):
await IntFields.create(intnum=3, intnum_null=10)
await IntFields.create(intnum=1, intnum_null=4)
await IntFields.create(intnum=2)
ints = await IntFields.annotate(clean_intnum_null=Coalesce("intnum_null", 0)).filter(
clean_intnum_null__gt=0
)
self.assertEqual(len(ints), 2)
self.assertSetEqual({i.clean_intnum_null for i in ints}, {10, 4})
async def test_filter_by_aggregation_field_comparison_length(self):
t1 = await Tournament.create(name="Tournament")
await Event.create(name="event1", tournament=t1)
await Event.create(name="event2", tournament=t1)
t2 = await Tournament.create(name="contest")
await Event.create(name="event3", tournament=t2)
await Tournament.create(name="Championship")
t4 = await Tournament.create(name="local")
await Event.create(name="event4", tournament=t4)
await Event.create(name="event5", tournament=t4)
tournaments = await Tournament.annotate(
name_len=Length("name"), event_count=Count("events")
).filter(name_len__gt=5, event_count=2)
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({t.name for t in tournaments}, {"Tournament"})
async def test_filter_by_annotation_lower(self):
await Tournament.create(name="Tournament")
await Tournament.create(name="NEW Tournament")
tournaments = await Tournament.annotate(name_lower=Lower("name"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name_lower for t in tournaments}, {"tournament", "new tournament"})
async def test_filter_by_annotation_upper(self):
await Tournament.create(name="ToUrnAmEnT")
await Tournament.create(name="new TOURnament")
tournaments = await Tournament.annotate(name_upper=Upper("name"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name_upper for t in tournaments}, {"TOURNAMENT", "NEW TOURNAMENT"})
async def test_values_select_relation(self):
with self.assertRaises(FieldError):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
await Event.all().values("tournament")
async def test_values_select_relation_field(self):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
event_tournaments = await Event.all().values("tournament__name")
self.assertEqual(event_tournaments[0]["tournament__name"], tournament.name)
async def test_values_select_relation_field_name_override(self):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
event_tournaments = await Event.all().values(tour="tournament__name")
self.assertEqual(event_tournaments[0]["tour"], tournament.name)
async def test_values_list_select_relation_field(self):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
event_tournaments = await Event.all().values_list("tournament__name")
self.assertEqual(event_tournaments[0][0], tournament.name)
| 45.518325 | 100 | 0.679549 | from pypika.functions import Coalesce, Count, Length, Lower, Trim, Upper
from tests.testmodels import Event, IntFields, Reporter, Team, Tournament
from tortoise.contrib import test
from tortoise.exceptions import FieldError
from tortoise.filters.q import Q
from tortoise.query.expressions import F
class TestFiltering(test.TortoiseTransactionedTestModelsTestCase):
async def test_filtering(self):
tournament = Tournament(name="Tournament")
await tournament.save()
second_tournament = Tournament(name="Tournament 2")
await second_tournament.save()
event_first = Event(name="1", tournament=tournament)
await event_first.save()
event_second = Event(name="2", tournament=second_tournament)
await event_second.save()
event_third = Event(name="3", tournament=tournament)
await event_third.save()
event_forth = Event(name="4", tournament=second_tournament)
await event_forth.save()
team_first = Team(name="First")
await team_first.save()
team_second = Team(name="Second")
await team_second.save()
await team_first.events.add(event_first)
await event_second.participants.add(team_second)
found_events = (
await Event.filter(Q(id__in=[event_first.id, event_second.id]) | Q(name="3"))
.filter(participants__not=team_second.id)
.order_by("name", "tournament_id")
.distinct()
)
self.assertEqual(len(found_events), 2)
self.assertEqual(found_events[0].id, event_first.id)
self.assertEqual(found_events[1].id, event_third.id)
await Team.filter(events__tournament_id=tournament.id).order_by("-events__name")
await Tournament.filter(events__name__in=["1", "3"]).distinct()
teams = await Team.filter(name__icontains="CON")
self.assertEqual(len(teams), 1)
self.assertEqual(teams[0].name, "Second")
teams = await Team.filter(name__iexact="SeCoNd")
self.assertEqual(len(teams), 1)
self.assertEqual(teams[0].name, "Second")
tournaments = await Tournament.filter(events__participants__name__startswith="Fir")
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0], tournament)
async def test_q_object_backward_related_query(self):
await Tournament.create(name="0")
tournament = await Tournament.create(name="Tournament")
event = await Event.create(name="1", tournament=tournament)
fetched_tournament = await Tournament.filter(events=event.id).first()
self.assertEqual(fetched_tournament.id, tournament.id)
fetched_tournament = await Tournament.filter(Q(events=event.id)).first()
self.assertEqual(fetched_tournament.id, tournament.id)
async def test_q_object_related_query(self):
tournament_first = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
event = await Event.create(name="1", tournament=tournament_second)
await Event.create(name="1", tournament=tournament_first)
fetched_event = await Event.filter(tournament=tournament_second).first()
self.assertEqual(fetched_event.id, event.id)
fetched_event = await Event.filter(Q(tournament=tournament_second)).first()
self.assertEqual(fetched_event.id, event.id)
fetched_event = await Event.filter(Q(tournament=tournament_second.id)).first()
self.assertEqual(fetched_event.id, event.id)
async def test_null_filter(self):
tournament = await Tournament.create(name="Tournament")
reporter = await Reporter.create(name="John")
await Event.create(name="2", tournament=tournament, reporter=reporter)
event = await Event.create(name="1", tournament=tournament)
fetched_events = await Event.filter(reporter=None)
self.assertEqual(len(fetched_events), 1)
self.assertEqual(fetched_events[0].id, event.id)
async def test_exclude(self):
await Tournament.create(name="0")
tournament = await Tournament.create(name="1")
tournaments = await Tournament.exclude(name="0")
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].name, tournament.name)
async def test_exclude_with_filter(self):
await Tournament.create(name="0")
tournament = await Tournament.create(name="1")
await Tournament.create(name="2")
tournaments = await Tournament.exclude(name="0").filter(id=tournament.id)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].name, tournament.name)
async def test_filter_null_on_related(self):
tournament = await Tournament.create(name="Tournament")
reporter = await Reporter.create(name="John")
event_first = await Event.create(name="1", tournament=tournament, reporter=reporter)
event_second = await Event.create(name="2", tournament=tournament)
team_first = await Team.create(name="1")
team_second = await Team.create(name="2")
await event_first.participants.add(team_first)
await event_second.participants.add(team_second)
fetched_teams = await Team.filter(events__reporter=None)
self.assertEqual(len(fetched_teams), 1)
self.assertEqual(fetched_teams[0].id, team_second.id)
async def test_filter_or(self):
await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
tournaments = await Tournament.filter(Q(name="1") | Q(name="2"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"1", "2"})
async def test_filter_not(self):
await Tournament.create(name="0")
await Tournament.create(name="1")
tournaments = await Tournament.filter(~Q(name="1"))
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].name, "0")
async def test_filter_not_with_or(self):
await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
tournaments = await Tournament.filter(Q(name="1") | ~Q(name="2"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "1"})
async def test_filter_with_f_expression(self):
await IntFields.create(intnum=1, intnum_null=1)
await IntFields.create(intnum=2, intnum_null=1)
self.assertEqual(await IntFields.filter(intnum=F("intnum_null")).count(), 1)
self.assertEqual(await IntFields.filter(intnum__gte=F("intnum_null")).count(), 2)
self.assertEqual(
await IntFields.filter(intnum=F("intnum_null") + F("intnum_null")).count(), 1
)
async def test_filter_by_aggregation_field(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Event.create(name="2", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(events_count=1)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_and(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
events_count=1, name="0"
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_and_as_one_node(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1, name="0")
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_and_as_two_nodes(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1) & Q(name="0")
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament.id)
async def test_filter_by_aggregation_field_with_or(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1) | Q(name="2")
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "2"})
async def test_filter_by_aggregation_field_with_or_reversed(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(name="2") | Q(events_count=1)
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "2"})
async def test_filter_by_aggregation_field_with_or_as_one_node(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
Q(events_count=1, name="2", join_type=Q.OR)
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"0", "2"})
async def test_filter_by_aggregation_field_with_not(self):
tournament = await Tournament.create(name="0")
tournament_second = await Tournament.create(name="1")
await Event.create(name="1", tournament=tournament)
await Event.create(name="2", tournament=tournament_second)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
~Q(events_count=1, name="0")
)
self.assertEqual(len(tournaments), 1)
self.assertEqual(tournaments[0].id, tournament_second.id)
async def test_filter_by_aggregation_field_with_or_not(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
~(Q(events_count=1) | Q(name="2"))
)
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({t.name for t in tournaments}, {"1"})
async def test_filter_by_aggregation_field_with_or_not_reversed(self):
tournament = await Tournament.create(name="0")
await Tournament.create(name="1")
await Tournament.create(name="2")
await Event.create(name="1", tournament=tournament)
tournaments = await Tournament.annotate(events_count=Count("events")).filter(
~(Q(name="2") | Q(events_count=1))
)
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({t.name for t in tournaments}, {"1"})
async def test_filter_by_aggregation_field_trim(self):
await Tournament.create(name=" 1 ")
await Tournament.create(name="2 ")
tournaments = await Tournament.annotate(trimmed_name=Trim("name")).filter(trimmed_name="1")
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({(t.name, t.trimmed_name) for t in tournaments}, {(" 1 ", "1")})
async def test_filter_by_aggregation_field_length(self):
await Tournament.create(name="12345")
await Tournament.create(name="123")
await Tournament.create(name="1234")
tournaments = await Tournament.annotate(name_len=Length("name")).filter(name_len__gte=4)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name for t in tournaments}, {"1234", "12345"})
async def test_filter_by_aggregation_field_coalesce(self):
await Tournament.create(name="1", desc="demo")
await Tournament.create(name="2")
tournaments = await Tournament.annotate(clean_desc=Coalesce("desc", "demo")).filter(
clean_desc="demo"
)
self.assertEqual(len(tournaments), 2)
self.assertSetEqual(
{(t.name, t.clean_desc) for t in tournaments}, {("1", "demo"), ("2", "demo")}
)
async def test_filter_by_aggregation_field_coalesce_numeric(self):
await IntFields.create(intnum=1, intnum_null=10)
await IntFields.create(intnum=4)
ints = await IntFields.annotate(clean_intnum_null=Coalesce("intnum_null", 0)).filter(
clean_intnum_null__in=(0, 10)
)
self.assertEqual(len(ints), 2)
self.assertSetEqual(
{(i.intnum_null, i.clean_intnum_null) for i in ints}, {(None, 0), (10, 10)}
)
async def test_filter_by_aggregation_field_comparison_coalesce_numeric(self):
await IntFields.create(intnum=3, intnum_null=10)
await IntFields.create(intnum=1, intnum_null=4)
await IntFields.create(intnum=2)
ints = await IntFields.annotate(clean_intnum_null=Coalesce("intnum_null", 0)).filter(
clean_intnum_null__gt=0
)
self.assertEqual(len(ints), 2)
self.assertSetEqual({i.clean_intnum_null for i in ints}, {10, 4})
async def test_filter_by_aggregation_field_comparison_length(self):
t1 = await Tournament.create(name="Tournament")
await Event.create(name="event1", tournament=t1)
await Event.create(name="event2", tournament=t1)
t2 = await Tournament.create(name="contest")
await Event.create(name="event3", tournament=t2)
await Tournament.create(name="Championship")
t4 = await Tournament.create(name="local")
await Event.create(name="event4", tournament=t4)
await Event.create(name="event5", tournament=t4)
tournaments = await Tournament.annotate(
name_len=Length("name"), event_count=Count("events")
).filter(name_len__gt=5, event_count=2)
self.assertEqual(len(tournaments), 1)
self.assertSetEqual({t.name for t in tournaments}, {"Tournament"})
async def test_filter_by_annotation_lower(self):
await Tournament.create(name="Tournament")
await Tournament.create(name="NEW Tournament")
tournaments = await Tournament.annotate(name_lower=Lower("name"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name_lower for t in tournaments}, {"tournament", "new tournament"})
async def test_filter_by_annotation_upper(self):
await Tournament.create(name="ToUrnAmEnT")
await Tournament.create(name="new TOURnament")
tournaments = await Tournament.annotate(name_upper=Upper("name"))
self.assertEqual(len(tournaments), 2)
self.assertSetEqual({t.name_upper for t in tournaments}, {"TOURNAMENT", "NEW TOURNAMENT"})
async def test_values_select_relation(self):
with self.assertRaises(FieldError):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
await Event.all().values("tournament")
async def test_values_select_relation_field(self):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
event_tournaments = await Event.all().values("tournament__name")
self.assertEqual(event_tournaments[0]["tournament__name"], tournament.name)
async def test_values_select_relation_field_name_override(self):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
event_tournaments = await Event.all().values(tour="tournament__name")
self.assertEqual(event_tournaments[0]["tour"], tournament.name)
async def test_values_list_select_relation_field(self):
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Test", tournament_id=tournament.id)
event_tournaments = await Event.all().values_list("tournament__name")
self.assertEqual(event_tournaments[0][0], tournament.name)
| true | true |
1c3c53cbb02869747faafe0d29281a71dde13196 | 895 | py | Python | isi_sdk_9_0_0/test/test_progress_global.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_9_0_0/test/test_progress_global.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_9_0_0/test/test_progress_global.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.progress_global import ProgressGlobal # noqa: E501
from isi_sdk_9_0_0.rest import ApiException
class TestProgressGlobal(unittest.TestCase):
"""ProgressGlobal unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProgressGlobal(self):
"""Test ProgressGlobal"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_9_0_0.models.progress_global.ProgressGlobal() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.829268 | 85 | 0.702793 |
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.progress_global import ProgressGlobal
from isi_sdk_9_0_0.rest import ApiException
class TestProgressGlobal(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testProgressGlobal(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c3c546677148ccdcdf143e4d5d7a475beb9fc59 | 2,394 | py | Python | dbscan/dbscan_ex_1.py | RoteKekse/FDAsandbox | 4668ea3b7adf4908175719caf1fded808f012b85 | [
"MIT"
] | null | null | null | dbscan/dbscan_ex_1.py | RoteKekse/FDAsandbox | 4668ea3b7adf4908175719caf1fded808f012b85 | [
"MIT"
] | null | null | null | dbscan/dbscan_ex_1.py | RoteKekse/FDAsandbox | 4668ea3b7adf4908175719caf1fded808f012b85 | [
"MIT"
] | null | null | null | print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
# #############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
import matplotlib.pyplot as plt
plt.title('Cluster Beispiel')
plt.plot(X[:,0],X[:,1],'o')
plt.show()
# #############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
# #############################################################################
# Plot result
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show() | 35.205882 | 79 | 0.630326 | print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
| true | true |
1c3c55c6da3aec9ad7024f900efa4d0038a651ad | 2,799 | py | Python | code.py | shambhavit14/Olympic-hero | 58aa58a46daeaf4486fcdf9fc4aa9e026f683f92 | [
"MIT"
] | null | null | null | code.py | shambhavit14/Olympic-hero | 58aa58a46daeaf4486fcdf9fc4aa9e026f683f92 | [
"MIT"
] | null | null | null | code.py | shambhavit14/Olympic-hero | 58aa58a46daeaf4486fcdf9fc4aa9e026f683f92 | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data = pd.read_csv(path)
data.rename(columns = {'Total':'Total_Medals'},inplace = True)
data.head(10)
#Code starts here
# --------------
#Code starts here
data['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'] , 'Both' , (np.where(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')))
better_event = data['Better_Event'].value_counts().idxmax()
# --------------
#Code starts here
z = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries = z[0:-1]
def top_ten(dff, colm):
num = 0
country_list = []
a = top_countries.nlargest(10,colm)
for i in range(len(a['Country_Name'])):
b = a['Country_Name'].iloc[num]
country_list.append(b)
num +=1
return country_list
top_10_summer = top_ten(top_countries , 'Total_Summer')
top_10_winter = top_ten(top_countries , 'Total_Winter')
top_10 = top_ten(top_countries , 'Total_Medals')
common = ['United States', 'Sweden', 'Germany', 'Soviet Union']
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
summer_df.plot.bar('Country_Name','Total_Summer')
winter_df.plot.bar('Country_Name','Total_Summer')
top_df.plot.bar('Country_Name','Total_Summer')
# --------------
#Code starts here
summer_df['Golden_Ratio'] = summer_df['Gold_Summer']/ summer_df['Total_Summer']
summer_max_ratio = summer_df['Golden_Ratio'].max()
summer_country_gold = summer_df[summer_df['Golden_Ratio'] == summer_max_ratio]['Country_Name'].iloc[0]
winter_df['Golden_Ratio'] = winter_df['Gold_Winter']/ winter_df['Total_Winter']
winter_max_ratio = winter_df['Golden_Ratio'].max()
winter_country_gold = winter_df[winter_df['Golden_Ratio'] == winter_max_ratio]['Country_Name'].iloc[0]
top_df['Golden_Ratio'] = top_df['Gold_Total']/ top_df['Total_Medals']
top_max_ratio = top_df['Golden_Ratio'].max()
top_country_gold = top_df[top_df['Golden_Ratio'] == top_max_ratio]['Country_Name'].iloc[0]
# --------------
#Code starts here
data_1 = data[0:-1]
data_1['Total_Points'] = data['Gold_Total']*3 + data['Silver_Total']*2 + data['Bronze_Total']
most_points = data_1['Total_Points'].max()
best_country = data_1[data_1['Total_Points'] == most_points]['Country_Name'].iloc[0]
# --------------
#Code starts here
best = data[data['Country_Name'] == best_country]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar(stacked = True)
plt.xlabel('United States')
plt.ylabel('Medals')
plt.xticks(rotation = 45)
| 29.776596 | 165 | 0.6806 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv(path)
data.rename(columns = {'Total':'Total_Medals'},inplace = True)
data.head(10)
data['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'] , 'Both' , (np.where(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')))
better_event = data['Better_Event'].value_counts().idxmax()
z = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries = z[0:-1]
def top_ten(dff, colm):
num = 0
country_list = []
a = top_countries.nlargest(10,colm)
for i in range(len(a['Country_Name'])):
b = a['Country_Name'].iloc[num]
country_list.append(b)
num +=1
return country_list
top_10_summer = top_ten(top_countries , 'Total_Summer')
top_10_winter = top_ten(top_countries , 'Total_Winter')
top_10 = top_ten(top_countries , 'Total_Medals')
common = ['United States', 'Sweden', 'Germany', 'Soviet Union']
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
summer_df.plot.bar('Country_Name','Total_Summer')
winter_df.plot.bar('Country_Name','Total_Summer')
top_df.plot.bar('Country_Name','Total_Summer')
summer_df['Golden_Ratio'] = summer_df['Gold_Summer']/ summer_df['Total_Summer']
summer_max_ratio = summer_df['Golden_Ratio'].max()
summer_country_gold = summer_df[summer_df['Golden_Ratio'] == summer_max_ratio]['Country_Name'].iloc[0]
winter_df['Golden_Ratio'] = winter_df['Gold_Winter']/ winter_df['Total_Winter']
winter_max_ratio = winter_df['Golden_Ratio'].max()
winter_country_gold = winter_df[winter_df['Golden_Ratio'] == winter_max_ratio]['Country_Name'].iloc[0]
top_df['Golden_Ratio'] = top_df['Gold_Total']/ top_df['Total_Medals']
top_max_ratio = top_df['Golden_Ratio'].max()
top_country_gold = top_df[top_df['Golden_Ratio'] == top_max_ratio]['Country_Name'].iloc[0]
data_1 = data[0:-1]
data_1['Total_Points'] = data['Gold_Total']*3 + data['Silver_Total']*2 + data['Bronze_Total']
most_points = data_1['Total_Points'].max()
best_country = data_1[data_1['Total_Points'] == most_points]['Country_Name'].iloc[0]
best = data[data['Country_Name'] == best_country]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar(stacked = True)
plt.xlabel('United States')
plt.ylabel('Medals')
plt.xticks(rotation = 45)
| true | true |
1c3c560a73b1a038c13899cb31cf26c009c09c4e | 425 | py | Python | Portfolio_Django/Portfolio_Django/asgi.py | adityagirigoudar/adityagirigoudar.github.io | 3e8631a528d5ad01d80bb0d563267385c196af31 | [
"Apache-2.0"
] | null | null | null | Portfolio_Django/Portfolio_Django/asgi.py | adityagirigoudar/adityagirigoudar.github.io | 3e8631a528d5ad01d80bb0d563267385c196af31 | [
"Apache-2.0"
] | null | null | null | Portfolio_Django/Portfolio_Django/asgi.py | adityagirigoudar/adityagirigoudar.github.io | 3e8631a528d5ad01d80bb0d563267385c196af31 | [
"Apache-2.0"
] | null | null | null | """
ASGI config for Portfolio_Django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Portfolio_Django.settings')
application = get_asgi_application()
| 25 | 79 | 0.764706 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Portfolio_Django.settings')
application = get_asgi_application()
| true | true |
1c3c56332fed0fe4a3ce8df20169d07642f8eaa9 | 2,695 | py | Python | betellib.py | hippke/betelbot | 8cd9292f272d504627c1def7bf99ace8d1a6a4cc | [
"MIT"
] | 48 | 2020-01-29T16:10:06.000Z | 2022-03-26T21:55:19.000Z | betellib.py | hippke/betelbot | 8cd9292f272d504627c1def7bf99ace8d1a6a4cc | [
"MIT"
] | 3 | 2020-01-29T21:28:06.000Z | 2020-06-11T09:11:07.000Z | betellib.py | hippke/betelbot | 8cd9292f272d504627c1def7bf99ace8d1a6a4cc | [
"MIT"
] | 5 | 2020-02-10T05:35:44.000Z | 2021-01-26T18:05:21.000Z | import os
import requests
import numpy as np
from twython import Twython
from bs4 import BeautifulSoup
from astropy.stats import biweight_location
consumer_key = os.environ.get('consumer_key')
consumer_secret = os.environ.get('consumer_secret')
access_token = os.environ.get('access_token')
access_token_secret = os.environ.get('access_token_secret')
def tweet(text, image):
print('Tweeting...')
twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)
response = twitter.upload_media(media=open(image, 'rb'))
twitter.update_status(status=text, media_ids=[response['media_id']])
print("Done.")
def build_string(days_ago, mag):
print('Building string...')
data_last24hrs = np.where(days_ago<1)
data_last1_6_days = np.where((days_ago<6) & (days_ago>1))
n_obs_last24hrs = np.size(mag[data_last24hrs])
n_obs_last1_6_days = np.size(mag[data_last1_6_days])
mean_last24hrs = biweight_location(mag[data_last24hrs])
mean_last1_6_days = biweight_location(mag[data_last1_6_days])
stdev = np.std(mag[data_last24hrs]) / np.sqrt(n_obs_last24hrs) \
+ np.std(mag[data_last1_6_days]) / np.sqrt(n_obs_last1_6_days)
diff = mean_last24hrs - mean_last1_6_days
sigma = diff / stdev
if n_obs_last24hrs < 1 or n_obs_last1_6_days < 1:
return "No new observations last night"
else:
if diff > 0:
changeword = 'dimmer'
else:
changeword = 'brighter'
mag_text = "My visual mag from last night was " + \
str(format(mean_last24hrs, '.2f')) + \
' (robust mean of ' + \
str(n_obs_last24hrs) + \
' observations). '
change_text = 'That is ' + \
format(abs(diff), '.2f') + \
' mag ' + \
changeword + \
' than the robust mean of the 5 previous nights (n=' + \
str(n_obs_last1_6_days) + \
', ' + \
format(abs(sigma), '.1f') + \
'σ). #Betelgeuse'
text = mag_text + change_text
print(text)
return text
def get_mags_from_AAVSO(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
rows = soup.select('tbody tr')
dates = []
mags = []
for row in rows:
string = '' + row.text
string = string.split('\n')
try:
date = float(string[3])
mag = float(string[5])
print(date, mag)
# Remove crap
if mag < 3 and date > 1000000:
dates.append(date)
mags.append(mag)
except:
pass
return np.array(dates), np.array(mags)
| 30.977011 | 87 | 0.605566 | import os
import requests
import numpy as np
from twython import Twython
from bs4 import BeautifulSoup
from astropy.stats import biweight_location
consumer_key = os.environ.get('consumer_key')
consumer_secret = os.environ.get('consumer_secret')
access_token = os.environ.get('access_token')
access_token_secret = os.environ.get('access_token_secret')
def tweet(text, image):
print('Tweeting...')
twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)
response = twitter.upload_media(media=open(image, 'rb'))
twitter.update_status(status=text, media_ids=[response['media_id']])
print("Done.")
def build_string(days_ago, mag):
print('Building string...')
data_last24hrs = np.where(days_ago<1)
data_last1_6_days = np.where((days_ago<6) & (days_ago>1))
n_obs_last24hrs = np.size(mag[data_last24hrs])
n_obs_last1_6_days = np.size(mag[data_last1_6_days])
mean_last24hrs = biweight_location(mag[data_last24hrs])
mean_last1_6_days = biweight_location(mag[data_last1_6_days])
stdev = np.std(mag[data_last24hrs]) / np.sqrt(n_obs_last24hrs) \
+ np.std(mag[data_last1_6_days]) / np.sqrt(n_obs_last1_6_days)
diff = mean_last24hrs - mean_last1_6_days
sigma = diff / stdev
if n_obs_last24hrs < 1 or n_obs_last1_6_days < 1:
return "No new observations last night"
else:
if diff > 0:
changeword = 'dimmer'
else:
changeword = 'brighter'
mag_text = "My visual mag from last night was " + \
str(format(mean_last24hrs, '.2f')) + \
' (robust mean of ' + \
str(n_obs_last24hrs) + \
' observations). '
change_text = 'That is ' + \
format(abs(diff), '.2f') + \
' mag ' + \
changeword + \
' than the robust mean of the 5 previous nights (n=' + \
str(n_obs_last1_6_days) + \
', ' + \
format(abs(sigma), '.1f') + \
'σ). #Betelgeuse'
text = mag_text + change_text
print(text)
return text
def get_mags_from_AAVSO(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
rows = soup.select('tbody tr')
dates = []
mags = []
for row in rows:
string = '' + row.text
string = string.split('\n')
try:
date = float(string[3])
mag = float(string[5])
print(date, mag)
if mag < 3 and date > 1000000:
dates.append(date)
mags.append(mag)
except:
pass
return np.array(dates), np.array(mags)
| true | true |
1c3c567cb1666d4b50d5ad26b75b422c2ec15f81 | 1,449 | py | Python | mirumon/services/wmi_api/hardware.py | mirumon/mirumon-windows-client | 8d21726288478174467a436fe94e1c1831bda9e7 | [
"MIT"
] | 1 | 2021-09-15T09:17:32.000Z | 2021-09-15T09:17:32.000Z | mirumon/services/wmi_api/hardware.py | mirumon/mirumon-windows-client | 8d21726288478174467a436fe94e1c1831bda9e7 | [
"MIT"
] | 6 | 2019-09-14T18:15:19.000Z | 2019-12-03T22:27:45.000Z | mirumon/services/wmi_api/hardware.py | mirumon/mirumon-win-client | 8d21726288478174467a436fe94e1c1831bda9e7 | [
"MIT"
] | null | null | null | from typing import Any, List
import wmi
from mirumon.schemas.computer.hardware import (
HardwareModel,
MotherBoardModel,
NetworkAdapterModel,
PhysicalDiskModel,
ProcessorModel,
VideoControllerModel,
)
def get_motherboard(computer: wmi.WMI, *_: Any) -> MotherBoardModel:
mother = computer.Win32_BaseBoard()[0]
return MotherBoardModel.from_orm(mother)
def get_cpu(computer: wmi.WMI, *_: Any) -> List[ProcessorModel]:
return [ProcessorModel.from_orm(cpu) for cpu in computer.Win32_Processor()]
def get_gpu(computer: wmi.WMI, *_: Any) -> List[VideoControllerModel]:
return [
VideoControllerModel.from_orm(gpu) for gpu in computer.Win32_VideoController()
]
def get_network_adapters(computer: wmi.WMI, *_: Any) -> List[NetworkAdapterModel]:
return [
NetworkAdapterModel.from_orm(interface)
for interface in computer.Win32_NetworkAdapterConfiguration(IPEnabled=1)
]
def get_physical_disks(computer: wmi.WMI, *_: Any) -> List[PhysicalDiskModel]:
return [
PhysicalDiskModel.from_orm(physical_disk)
for physical_disk in computer.Win32_DiskDrive()
]
def get_hardware(computer: wmi.WMI, *_: Any) -> HardwareModel:
return HardwareModel(
motherboard=get_motherboard(computer),
cpu=get_cpu(computer),
gpu=get_gpu(computer),
network=get_network_adapters(computer),
disks=get_physical_disks(computer),
)
| 27.865385 | 86 | 0.717736 | from typing import Any, List
import wmi
from mirumon.schemas.computer.hardware import (
HardwareModel,
MotherBoardModel,
NetworkAdapterModel,
PhysicalDiskModel,
ProcessorModel,
VideoControllerModel,
)
def get_motherboard(computer: wmi.WMI, *_: Any) -> MotherBoardModel:
mother = computer.Win32_BaseBoard()[0]
return MotherBoardModel.from_orm(mother)
def get_cpu(computer: wmi.WMI, *_: Any) -> List[ProcessorModel]:
return [ProcessorModel.from_orm(cpu) for cpu in computer.Win32_Processor()]
def get_gpu(computer: wmi.WMI, *_: Any) -> List[VideoControllerModel]:
return [
VideoControllerModel.from_orm(gpu) for gpu in computer.Win32_VideoController()
]
def get_network_adapters(computer: wmi.WMI, *_: Any) -> List[NetworkAdapterModel]:
return [
NetworkAdapterModel.from_orm(interface)
for interface in computer.Win32_NetworkAdapterConfiguration(IPEnabled=1)
]
def get_physical_disks(computer: wmi.WMI, *_: Any) -> List[PhysicalDiskModel]:
return [
PhysicalDiskModel.from_orm(physical_disk)
for physical_disk in computer.Win32_DiskDrive()
]
def get_hardware(computer: wmi.WMI, *_: Any) -> HardwareModel:
return HardwareModel(
motherboard=get_motherboard(computer),
cpu=get_cpu(computer),
gpu=get_gpu(computer),
network=get_network_adapters(computer),
disks=get_physical_disks(computer),
)
| true | true |
1c3c579051c05fdef3ca697113d59e6cca957f96 | 609 | py | Python | Resene naloge/euler32.py | CadezDavid/ProjectEuler | 9e11aa5782fb600c98eba9e04766b3bd79acea0e | [
"MIT"
] | null | null | null | Resene naloge/euler32.py | CadezDavid/ProjectEuler | 9e11aa5782fb600c98eba9e04766b3bd79acea0e | [
"MIT"
] | null | null | null | Resene naloge/euler32.py | CadezDavid/ProjectEuler | 9e11aa5782fb600c98eba9e04766b3bd79acea0e | [
"MIT"
] | null | null | null | from itertools import permutations
def je_pandigital(stevilo):
return len(str(stevilo)) == 9 and len(set(str(stevilo))) == 9 and isinstance(stevilo, int)
def tuple_to_str(tuple):
string = ''
for indeks in range(len(tuple)):
string += str(tuple[indeks])
return string
seznam_pandigital_st = set([tuple_to_str(tuple) for tuple in permutations('123456789', 9)])
vsota = set()
for stevilo in seznam_pandigital_st:
if int(stevilo[0]) * int(stevilo[1:5]) == int(stevilo[5:]) or \
int(stevilo[:2]) * int(stevilo[2:5]) == int(stevilo[5:]) :
vsota.add(int(stevilo[5:])) | 32.052632 | 94 | 0.660099 | from itertools import permutations
def je_pandigital(stevilo):
return len(str(stevilo)) == 9 and len(set(str(stevilo))) == 9 and isinstance(stevilo, int)
def tuple_to_str(tuple):
string = ''
for indeks in range(len(tuple)):
string += str(tuple[indeks])
return string
seznam_pandigital_st = set([tuple_to_str(tuple) for tuple in permutations('123456789', 9)])
vsota = set()
for stevilo in seznam_pandigital_st:
if int(stevilo[0]) * int(stevilo[1:5]) == int(stevilo[5:]) or \
int(stevilo[:2]) * int(stevilo[2:5]) == int(stevilo[5:]) :
vsota.add(int(stevilo[5:])) | true | true |
1c3c58028e0d35172b0466f5d0bbf2eaa391023d | 15,535 | py | Python | docker_ws/src/keras-YOLOv3-model-set/yolo4/models/layers.py | Pomiculture/GAN-Vitis-AI | 148da346c3ec882f24a98b8231800a94c54cc709 | [
"Apache-2.0"
] | 2 | 2021-08-10T13:38:19.000Z | 2021-09-27T03:06:40.000Z | docker_ws/src/keras-YOLOv3-model-set/yolo4/models/layers.py | Pomiculture/GAN-Vitis-AI | 148da346c3ec882f24a98b8231800a94c54cc709 | [
"Apache-2.0"
] | null | null | null | docker_ws/src/keras-YOLOv3-model-set/yolo4/models/layers.py | Pomiculture/GAN-Vitis-AI | 148da346c3ec882f24a98b8231800a94c54cc709 | [
"Apache-2.0"
] | 2 | 2021-12-30T09:04:13.000Z | 2021-12-30T09:04:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Common layer definition for YOLOv4 models building
"""
from functools import wraps, reduce
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, Concatenate, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.regularizers import l2
from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
@wraps(YoloConv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for YoloConv2D."""
#darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
#darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides')==(2,2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloConv2D(*args, **darknet_conv_kwargs)
@wraps(YoloDepthwiseConv2D)
def DarknetDepthwiseConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for YoloDepthwiseConv2D."""
#darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
#darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides')==(2,2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloDepthwiseConv2D(*args, **darknet_conv_kwargs)
def Darknet_Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None, **kwargs):
"""Depthwise Separable Convolution2D."""
if not block_id_str:
block_id_str = str(K.get_uid())
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetDepthwiseConv2D(kernel_size, name='conv_dw_' + block_id_str, **no_bias_kwargs),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
YoloConv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None):
"""Depthwise Separable Convolution2D."""
if not block_id_str:
block_id_str = str(K.get_uid())
return compose(
YoloDepthwiseConv2D(kernel_size, padding='same', name='conv_dw_' + block_id_str),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
YoloConv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by CustomBatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
CustomBatchNormalization(),
LeakyReLU(alpha=0.1))
def mish(x):
return x * K.tanh(K.softplus(x))
def DarknetConv2D_BN_Mish(*args, **kwargs):
"""Darknet Convolution2D followed by CustomBatchNormalization and Mish."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
CustomBatchNormalization(),
Activation(mish))
def Spp_Conv2D_BN_Leaky(x, num_filters):
y1 = MaxPooling2D(pool_size=(5,5), strides=(1,1), padding='same')(x)
y2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(x)
y3 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(x)
y = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))([y3, y2, y1, x])
return y
def make_yolo_head(x, num_filters):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def make_yolo_spp_head(x, num_filters):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
x = Spp_Conv2D_BN_Leaky(x, num_filters)
x = compose(
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def make_yolo_depthwise_separable_head(x, num_filters, block_id_str=None):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
if not block_id_str:
block_id_str = str(K.get_uid())
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_1'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_2'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def make_yolo_spp_depthwise_separable_head(x, num_filters, block_id_str=None):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
if not block_id_str:
block_id_str = str(K.get_uid())
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_1'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
x = Spp_Conv2D_BN_Leaky(x, num_filters)
x = compose(
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_2'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def yolo4_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2, f3 = feature_maps
f1_channel_num, f2_channel_num, f3_channel_num = feature_channel_nums
#feature map 1 head (19x19 for 608 input)
x1 = make_yolo_spp_head(f1, f1_channel_num//2)
#upsample fpn merge for feature map 1 & 2
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1))(f2)
x2 = Concatenate()([x2, x1_upsample])
#feature map 2 head (38x38 for 608 input)
x2 = make_yolo_head(x2, f2_channel_num//2)
#upsample fpn merge for feature map 2 & 3
x2_upsample = compose(
DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1)),
UpSampling2D(2))(x2)
x3 = DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1))(f3)
x3 = Concatenate()([x3, x2_upsample])
#feature map 3 head & output (76x76 for 608 input)
#x3, y3 = make_last_layers(x3, f3_channel_num//2, num_anchors*(num_classes+5))
x3 = make_yolo_head(x3, f3_channel_num//2)
y3 = compose(
DarknetConv2D_BN_Leaky(f3_channel_num, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_3'))(x3)
#downsample fpn merge for feature map 3 & 2
x3_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
DarknetConv2D_BN_Leaky(f2_channel_num//2, (3,3), strides=(2,2)))(x3)
x2 = Concatenate()([x3_downsample, x2])
#feature map 2 output (38x38 for 608 input)
#x2, y2 = make_last_layers(x2, 256, num_anchors*(num_classes+5))
x2 = make_yolo_head(x2, f2_channel_num//2)
y2 = compose(
DarknetConv2D_BN_Leaky(f2_channel_num, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2'))(x2)
#downsample fpn merge for feature map 2 & 1
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
DarknetConv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2)))(x2)
x1 = Concatenate()([x2_downsample, x1])
#feature map 1 output (19x19 for 608 input)
#x1, y1 = make_last_layers(x1, f1_channel_num//2, num_anchors*(num_classes+5))
x1 = make_yolo_head(x1, f1_channel_num//2)
y1 = compose(
DarknetConv2D_BN_Leaky(f1_channel_num, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1'))(x1)
return y1, y2, y3
def yolo4lite_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2, f3 = feature_maps
f1_channel_num, f2_channel_num, f3_channel_num = feature_channel_nums
#feature map 1 head (13 x 13 x f1_channel_num//2 for 416 input)
x1 = make_yolo_spp_depthwise_separable_head(f1, f1_channel_num//2, block_id_str='pred_1')
#upsample fpn merge for feature map 1 & 2
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1))(f2)
x2 = Concatenate()([x2, x1_upsample])
#feature map 2 head (26 x 26 x f2_channel_num//2 for 416 input)
x2 = make_yolo_depthwise_separable_head(x2, f2_channel_num//2, block_id_str='pred_2')
#upsample fpn merge for feature map 2 & 3
x2_upsample = compose(
DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1)),
UpSampling2D(2))(x2)
x3 = DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1))(f3)
x3 = Concatenate()([x3, x2_upsample])
#feature map 3 head & output (52 x 52 x f3_channel_num for 416 input)
x3 = make_yolo_depthwise_separable_head(x3, f3_channel_num//2, block_id_str='pred_3')
y3 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(f3_channel_num, (3,3), block_id_str='pred_3_3'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_3'))(x3)
#downsample fpn merge for feature map 3 & 2
x3_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num//2, (3,3), strides=(2,2), block_id_str='pred_3_4'))(x3)
x2 = Concatenate()([x3_downsample, x2])
#feature map 2 output (26 x 26 x f2_channel_num for 416 input)
x2 = make_yolo_depthwise_separable_head(x2, f2_channel_num//2, block_id_str='pred_4')
y2 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num, (3,3), block_id_str='pred_4_3'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2'))(x2)
#downsample fpn merge for feature map 2 & 1
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2), block_id_str='pred_4_4'))(x2)
x1 = Concatenate()([x2_downsample, x1])
#feature map 1 output (13 x 13 x f1_channel_num for 416 input)
#x1, y1 = make_depthwise_separable_last_layers(x1, f1_channel_num//2, num_anchors*(num_classes+5))
x1 = make_yolo_depthwise_separable_head(x1, f1_channel_num//2, block_id_str='pred_5')
y1 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num, (3,3), block_id_str='pred_5_3'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1'))(x1)
return y1, y2, y3
def tiny_yolo4_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes, use_spp):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
#feature map 1 head (13 x 13 x f1_channel_num//2 for 416 input)
x1 = DarknetConv2D_BN_Leaky(f1_channel_num//2, (1,1))(f1)
if use_spp:
x1 = Spp_Conv2D_BN_Leaky(x1, f1_channel_num//2)
#upsample fpn merge for feature map 1 & 2
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = compose(
Concatenate(),
#Depthwise_Separable_Conv2D_BN_Leaky(filters=f2_channel_num, kernel_size=(3, 3), block_id_str='15'),
DarknetConv2D_BN_Leaky(f2_channel_num, (3,3)))([x1_upsample, f2])
#feature map 2 output (26 x 26 x f2_channel_num for 416 input)
y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2')(x2)
#downsample fpn merge for feature map 2 & 1
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
#Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2), block_id_str='16'),
DarknetConv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2)))(x2)
x1 = compose(
Concatenate(),
#Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='17'),
DarknetConv2D_BN_Leaky(f1_channel_num, (3,3)))([x2_downsample, x1])
#feature map 1 output (13 x 13 x f1_channel_num for 416 input)
y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1')(x1)
return y1, y2
def tiny_yolo4lite_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes, use_spp):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
#feature map 1 head (13 x 13 x f1_channel_num//2 for 416 input)
x1 = DarknetConv2D_BN_Leaky(f1_channel_num//2, (1,1))(f1)
if use_spp:
x1 = Spp_Conv2D_BN_Leaky(x1, f1_channel_num//2)
#upsample fpn merge for feature map 1 & 2
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = compose(
Concatenate(),
#DarknetConv2D_BN_Leaky(f2_channel_num, (3,3)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=f2_channel_num, kernel_size=(3, 3), block_id_str='pred_1'))([x1_upsample, f2])
#feature map 2 output (26 x 26 x f2_channel_num for 416 input)
y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2')(x2)
#downsample fpn merge for feature map 2 & 1
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
#DarknetConv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2)),
Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2), block_id_str='pred_2'))(x2)
x1 = compose(
Concatenate(),
#DarknetConv2D_BN_Leaky(f1_channel_num, (3,3)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_3'))([x2_downsample, x1])
#feature map 1 output (13 x 13 x f1_channel_num for 416 input)
y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1')(x1)
return y1, y2
| 42.561644 | 147 | 0.683618 |
from functools import wraps, reduce
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, Concatenate, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.regularizers import l2
from common.backbones.layers import YoloConv2D, YoloDepthwiseConv2D, CustomBatchNormalization
def compose(*funcs):
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
@wraps(YoloConv2D)
def DarknetConv2D(*args, **kwargs):
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides')==(2,2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloConv2D(*args, **darknet_conv_kwargs)
@wraps(YoloDepthwiseConv2D)
def DarknetDepthwiseConv2D(*args, **kwargs):
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides')==(2,2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloDepthwiseConv2D(*args, **darknet_conv_kwargs)
def Darknet_Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None, **kwargs):
if not block_id_str:
block_id_str = str(K.get_uid())
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetDepthwiseConv2D(kernel_size, name='conv_dw_' + block_id_str, **no_bias_kwargs),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
YoloConv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def Depthwise_Separable_Conv2D_BN_Leaky(filters, kernel_size=(3, 3), block_id_str=None):
if not block_id_str:
block_id_str = str(K.get_uid())
return compose(
YoloDepthwiseConv2D(kernel_size, padding='same', name='conv_dw_' + block_id_str),
CustomBatchNormalization(name='conv_dw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
YoloConv2D(filters, (1,1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%s' % block_id_str),
CustomBatchNormalization(name='conv_pw_%s_bn' % block_id_str),
LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
def DarknetConv2D_BN_Leaky(*args, **kwargs):
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
CustomBatchNormalization(),
LeakyReLU(alpha=0.1))
def mish(x):
return x * K.tanh(K.softplus(x))
def DarknetConv2D_BN_Mish(*args, **kwargs):
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
CustomBatchNormalization(),
Activation(mish))
def Spp_Conv2D_BN_Leaky(x, num_filters):
y1 = MaxPooling2D(pool_size=(5,5), strides=(1,1), padding='same')(x)
y2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(x)
y3 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(x)
y = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))([y3, y2, y1, x])
return y
def make_yolo_head(x, num_filters):
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def make_yolo_spp_head(x, num_filters):
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
x = Spp_Conv2D_BN_Leaky(x, num_filters)
x = compose(
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def make_yolo_depthwise_separable_head(x, num_filters, block_id_str=None):
if not block_id_str:
block_id_str = str(K.get_uid())
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_1'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_2'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def make_yolo_spp_depthwise_separable_head(x, num_filters, block_id_str=None):
if not block_id_str:
block_id_str = str(K.get_uid())
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_1'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
x = Spp_Conv2D_BN_Leaky(x, num_filters)
x = compose(
Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters*2, kernel_size=(3, 3), block_id_str=block_id_str+'_2'),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
return x
def yolo4_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2, f3 = feature_maps
f1_channel_num, f2_channel_num, f3_channel_num = feature_channel_nums
x1 = make_yolo_spp_head(f1, f1_channel_num//2)
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1))(f2)
x2 = Concatenate()([x2, x1_upsample])
x2 = make_yolo_head(x2, f2_channel_num//2)
x2_upsample = compose(
DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1)),
UpSampling2D(2))(x2)
x3 = DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1))(f3)
x3 = Concatenate()([x3, x2_upsample])
x3 = make_yolo_head(x3, f3_channel_num//2)
y3 = compose(
DarknetConv2D_BN_Leaky(f3_channel_num, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_3'))(x3)
x3_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
DarknetConv2D_BN_Leaky(f2_channel_num//2, (3,3), strides=(2,2)))(x3)
x2 = Concatenate()([x3_downsample, x2])
x2 = make_yolo_head(x2, f2_channel_num//2)
y2 = compose(
DarknetConv2D_BN_Leaky(f2_channel_num, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2'))(x2)
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
DarknetConv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2)))(x2)
x1 = Concatenate()([x2_downsample, x1])
x1 = make_yolo_head(x1, f1_channel_num//2)
y1 = compose(
DarknetConv2D_BN_Leaky(f1_channel_num, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1'))(x1)
return y1, y2, y3
def yolo4lite_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2, f3 = feature_maps
f1_channel_num, f2_channel_num, f3_channel_num = feature_channel_nums
x1 = make_yolo_spp_depthwise_separable_head(f1, f1_channel_num//2, block_id_str='pred_1')
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1))(f2)
x2 = Concatenate()([x2, x1_upsample])
x2 = make_yolo_depthwise_separable_head(x2, f2_channel_num//2, block_id_str='pred_2')
x2_upsample = compose(
DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1)),
UpSampling2D(2))(x2)
x3 = DarknetConv2D_BN_Leaky(f3_channel_num//2, (1,1))(f3)
x3 = Concatenate()([x3, x2_upsample])
x3 = make_yolo_depthwise_separable_head(x3, f3_channel_num//2, block_id_str='pred_3')
y3 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(f3_channel_num, (3,3), block_id_str='pred_3_3'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_3'))(x3)
x3_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num//2, (3,3), strides=(2,2), block_id_str='pred_3_4'))(x3)
x2 = Concatenate()([x3_downsample, x2])
x2 = make_yolo_depthwise_separable_head(x2, f2_channel_num//2, block_id_str='pred_4')
y2 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num, (3,3), block_id_str='pred_4_3'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2'))(x2)
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2), block_id_str='pred_4_4'))(x2)
x1 = Concatenate()([x2_downsample, x1])
x1 = make_yolo_depthwise_separable_head(x1, f1_channel_num//2, block_id_str='pred_5')
y1 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num, (3,3), block_id_str='pred_5_3'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1'))(x1)
return y1, y2, y3
def tiny_yolo4_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes, use_spp):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
x1 = DarknetConv2D_BN_Leaky(f1_channel_num//2, (1,1))(f1)
if use_spp:
x1 = Spp_Conv2D_BN_Leaky(x1, f1_channel_num//2)
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(f2_channel_num, (3,3)))([x1_upsample, f2])
y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2')(x2)
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
DarknetConv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2)))(x2)
x1 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(f1_channel_num, (3,3)))([x2_downsample, x1])
y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1')(x1)
return y1, y2
def tiny_yolo4lite_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes, use_spp):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
x1 = DarknetConv2D_BN_Leaky(f1_channel_num//2, (1,1))(f1)
if use_spp:
x1 = Spp_Conv2D_BN_Leaky(x1, f1_channel_num//2)
x1_upsample = compose(
DarknetConv2D_BN_Leaky(f2_channel_num//2, (1,1)),
UpSampling2D(2))(x1)
x2 = compose(
Concatenate(),
Depthwise_Separable_Conv2D_BN_Leaky(filters=f2_channel_num, kernel_size=(3, 3), block_id_str='pred_1'))([x1_upsample, f2])
y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2')(x2)
x2_downsample = compose(
ZeroPadding2D(((1,0),(1,0))),
Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2), block_id_str='pred_2'))(x2)
x1 = compose(
Concatenate(),
Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='pred_3'))([x2_downsample, x1])
y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1')(x1)
return y1, y2
| true | true |
1c3c5856bfe266954a44b3aa2ab48c0742a210dc | 18,067 | py | Python | mklib/tasks.py | ActiveState/mk | 2d0afd81ce4e8a3f39885cae5a50ded7bece7f76 | [
"MIT"
] | 2 | 2015-12-21T22:35:16.000Z | 2017-08-29T14:47:38.000Z | mklib/tasks.py | ActiveState/mk | 2d0afd81ce4e8a3f39885cae5a50ded7bece7f76 | [
"MIT"
] | 1 | 2020-05-14T12:59:01.000Z | 2020-12-11T18:34:07.000Z | mklib/tasks.py | ActiveState/mk | 2d0afd81ce4e8a3f39885cae5a50ded7bece7f76 | [
"MIT"
] | null | null | null | # Copyright (c) 2005-2007 ActiveState Software Ltd.
"""Task classes."""
import sys
import os
from os.path import exists, dirname, abspath, normpath, join
import logging
import types
from mklib.common import *
class TaskType(type):
"""Metaclass for Task's to catch definitions in Makefiles and handle
accordingly.
"""
def __init__(cls, name, bases, dct):
super(TaskType, cls).__init__(name, bases, dct)
if dct["__module__"] != __name__:
frame = sys._getframe(1)
try:
makefile = frame.f_globals["_mk_makefile_"]
except KeyError:
# This isn't defined in a makefile. Don't do anything
# special with it.
return
# Normalizing descriptors for <task>.deps and <task>.results
# and <taskgroup>.pairs.
if issubclass(cls, TaskGroup):
if dct.get("results"):
raise IllegalMakefileError(
"TaskGroup %s (in %s) has a `results' attribute. "
"This is not allowed on TaskGroup classes. "
"Results are defined by the output of `pairs'."
% (cls.__name__, makefile.path))
if dct.get("make"):
raise IllegalMakefileError(
"TaskGroup %s (in %s) has a `make' attribute. "
"This is not allowed on TaskGroup classes. "
% (cls.__name__, makefile.path))
cls.pairs = PairsListAccessor("pairs",
getattr(cls, "pairs", None))
cls.deps = TaskGroupDepsListAccessor("deps",
getattr(cls, "deps", None))
cls.results = TaskGroupResultsListAccessor()
elif issubclass(cls, Alias):
if dct.get("results"):
raise IllegalMakefileError(
"Alias %s (in %s) has a `results' attribute. "
"This is not allowed on Alias classes. "
% (cls.__name__, makefile.path))
cls.deps = TaskOrFileListAccessor("deps",
getattr(cls, "deps", None))
else:
cls.deps = TaskOrFileListAccessor("deps",
getattr(cls, "deps", None))
cls.results = FileListAccessor("results",
getattr(cls, "results", None))
# Defining "default" on a Task is only good for that class
# -- *not* for subclasses.
cls.default = dct.get("default", False)
# Register this on the Makefile.
makefile.define_task(cls, name, bases, dct)
if issubclass(cls, TaskGroup):
log_makefile_defn("TaskGroup", name, frame)
else:
log_makefile_defn("Task", name, frame)
class Task(object):
"""Base class for a Makefile.py task.
Typically a specific task is a subclass of Task. For example:
from mklib.tasks import Task
class install(Task):
deps = ["build"]
def results(self):
yield os.path.join(self.cfg.prefix, "bin", "foo")
def make(self):
...
See mk's main documentation for more details.
"""
__metaclass__ = TaskType
default = False # set to true to set this as the default task
def __init__(self, makefile, cfg):
self.name = self.__class__.__name__
self.makefile = makefile
self.dir = makefile.dir # for convenience
if makefile.ns:
self.nsname = "%s:%s" % (':'.join(makefile.ns), self.name)
else:
self.nsname = self.name
self.cfg = cfg
self.log = logging.getLogger("mk.task." + self.name)
def __repr__(self):
if not self.results:
return "<Task '%s' (virtual)>" % self.nsname
else:
return "<Task '%s'>" % self.nsname
def __str__(self):
return "task `%s'" % self.nsname
def id(self):
# Use for tracking by the TaskMaster
return ("task", self.nsname)
@property
def name_tuple(self):
"""A tuple of the task namespace and name suitable as a sorting key."""
if self.makefile.ns:
rv = (tuple(self.makefile.ns), self.name)
else:
rv = (None, self.name)
return rv
#TODO: is this necessary? Not really anymore.
def doc(self):
"""Return documentation for this task, if any."""
return self.__doc__
# The presence of a make()' implementation on a Task class
# indicates if there is anything to execute to do this task. For
# example, typically an "all" task will not have a "make" method.
# Instead it will just have a number of dependencies.
#def make(self):
# ...
#TODO: Add this when/if add support for Task sub-classes.
#def is_complete(self):
# """Is this task complete.
#
# This is *not* meant to reflect the state of `self.results' for
# this task, just the task itself. This means that for the base
# class "Task" (a virtual task) this is always False.
# """
class Alias(Task):
"""A special kind of Task to provide a short name for one or more
tasks.
class stage_one(Alias):
deps = ["this_task", "and_that_task"]
This allows for some task dependency trees that don't artificially
create situations where tasks are re-built unnecessarily.
Consider this set of tasks:
...
class stage_docs(Task):
deps = ["docs_subtask_a", "docs_subtask_b"]
class stage_app(Task):
deps = ["app_subtask_a", "app_subtask_b"]
class installer(Task):
deps = ["stage_docs", "stage_app"]
results = ["app-1.0.0.dmg"]
def make(self):
# build the installer package...
The problem here is that 'stage_docs' and 'stage_app' are "virtual
tasks". (Virtual tasks are tasks with no 'results'. Like phony targets
in GNU Makefiles, virtual tasks are always "out-of-date".)
Because of that running:
mk installer
will *always* cause the installer build steps to be executed
("installer" depends on virtual tasks, hence it will always be
out-of-date).
Making the virtual tasks aliases solves this:
class stage_docs(Alias):
deps = ["docs_subtask_a", "docs_subtask_b"]
class stage_app(Alias):
deps = ["app_subtask_a", "app_subtask_b"]
Now running `mk installer` will only rebuild if one of the subtasks
is out-of-date.
"""
def __repr__(self):
return "<Alias '%s'>" % self.nsname
def __str__(self):
return "alias `%s'" % self.nsname
def id(self):
# Use for tracking by the TaskMaster
return ("alias", self.nsname)
class TaskGroup(Task):
"""Base class for a Makefile.py task group. E.g.:
from mklib.tasks import TaskGroup
from mklib import sh
class move_source_files(TaskGroup):
def pairs(self):
for name in os.listdir("src"):
src = join("src", name)
dst = join("build", name)
yield src, dst
def make_pair(self, src, dst)
sh.cp(src, dst)
See mk's main documentation for more details.
"""
default = False # set to true to set this as the default task
def __repr__(self):
return "<TaskGroup '%s'>" % self.nsname
def __str__(self):
return "task group `%s'" % self.nsname
#TODO: is this necessary? Not really anymore.
def doc(self):
return self.__doc__
#TODO: START HERE:
# - add special handling in TaskMaster for TaskGroups to call
# make_pair as necessary
def pairs(self):
raise NotImplementedError("sub-classes must implement pairs()")
def make_pair(self, dep, result):
raise NotImplementedError("sub-classes must implement make_pair()")
class FileType(type):
"""Metaclass for File's to catch definitions in Makefiles and handle
accordingly.
"""
def __init__(cls, name, bases, dct):
super(FileType, cls).__init__(name, bases, dct)
if dct["__module__"] != __name__:
# Normalizing descriptor for <file>.deps.
# String *results* are assumed to be file result paths
# (hence are transformed into File instances), because
# a base-Task instance -- i.e. a virtual task -- doesn't make
# sense: you can't have a "virtual" result.
cls.deps = FileListAccessor("deps",
getattr(cls, "deps", None))
# Register this on the Makefile.
frame = sys._getframe(1)
makefile = frame.f_globals["_mk_makefile_"]
makefile.define_file(cls, name, bases, dct)
log_makefile_defn("File", dct["path"], frame)
class File(object):
__metaclass__ = FileType
def __init__(self, path, makefile=None, cfg=None):
# Use absolute paths to guard against process cwd changes.
path = makefile and normpath(join(makefile.dir, path)) or path
self.path = abspath(path)
self.makefile = makefile
self.dir = makefile and makefile.dir or None
self.cfg = cfg
def __repr__(self):
return "<File '%s'>" % self.path
def __str__(self):
return "file `%s'" % self.nicepath
def id(self):
# Use for tracking by the TaskMaster
return ("file", self.path)
@property
def relpath(self):
return relpath(self.path)
@property
def nicepath(self):
r = self.relpath
a = self.path
if not sys.platform == "win32":
home = os.environ["HOME"]
if a.startswith(home):
#XXX:TODO: bug here for, e.g., "/home/jan" vs "/home/jane"
a = "~" + a[len(home):]
if len(r) < len(a):
return r
else:
return a
@property
def deps(self):
"""By default a file has no deps."""
# Dev Note: Using a property here to ensure separate instances
# don't share a single list instance.
return []
def exists(self):
return exists(self.path)
def mtime(self):
return os.stat(self.path).st_mtime
#---- descriptors for some Task and File attributes
class TaskOrFileListAccessor(object):
"""Descriptor for `<task>.deps`.
This wraps the definition of the attribute on the class (it can
be a list, a method that results a list, or a generator) so that
accessing this attribute always results in a list of Task or File
instances.
"""
def __init__(self, attrname, defn):
self._attrname = attrname
self._defn = defn
self._cache = None
def _get_items(self, obj, objtype):
if not self._defn:
items = []
elif isinstance(self._defn, (types.FunctionType, types.MethodType)):
items = self._defn(obj) # is either a generator or result list
else:
items = self._defn
# If the string matches a defined Task then it becomes that
# Task instance, else it becomes a File.
rv = []
for item in items:
if item is None:
continue
if isinstance(item, (File, Task)):
rv.append(item)
else:
rv += obj.makefile.tasks_or_files_from_name(item)
return rv
def __get__(self, obj, objtype):
if isinstance(self._defn, basestring):
raise IllegalMakefileError(
# '%ss': cheating, I know __str__() endswith an apostrophe
"%ss `%s' attribute is a string, it must "
"be a list (or a method that returns a list)"
% (obj, self._attrname))
if self._cache is None:
self._cache = self._get_items(obj, objtype)
return self._cache
class TaskGroupDepsListAccessor(TaskOrFileListAccessor):
"""Descriptor for `<taskgroup>.deps`."""
def _get_items(self, obj, objtype):
# A TaskGroup's deps are any possible items mentioned in
# a 'deps' attribute ...
rv = TaskOrFileListAccessor._get_items(self, obj, objtype)
# ... plus the deps from the individual dep/result "pairs".
rv += [dep for dep, res in obj.pairs]
return rv
class TaskGroupResultsListAccessor(object):
"""Descriptor for `<taskgroup>.results`."""
_cache = None
def __get__(self, obj, objtype):
if self._cache is None:
self._cache = [res for dep, res in obj.pairs]
return self._cache
class FileListAccessor(object):
"""Descriptor for `<task>.results` or `<file>.deps`.
This wraps the definition of the attribute on the class (it can
be a list, a method that results a list, or a generator) so that
accessing this attribute always results in a list of File instances.
Strings in `<task>.results` and `<file>.deps` are assumed to be file
result paths (hence are transformed into File instances), because a
base-Task instance -- i.e. a virtual task -- doesn't make sense: you
can't have a "virtual" result.
"""
def __init__(self, attrname, defn):
self._attrname = attrname
self._defn = defn
self._cache = None
def __get__(self, obj, objtype):
# This `return None` early out is an attempt to avoid this
# problem when defining a task from another task where the base
# task-class defines a "def results()" generator. I don't
# *really* understand what is going on here.
#
#-----
# Traceback
# ...
# File "support/metricstasks.py", line 299, in <module>
# class aslogs_downloads(_FooTask):
# File "/home/trentm/as/mk/mklib/tasks.py", line 62, in __init__
# getattr(cls, "results", None))
# File "/home/trentm/as/mk/mklib/tasks.py", line 406, in __get__
# items = self._defn(obj) # is either a generator or result list
# TypeError: Error when calling the metaclass bases
# unbound method results() must be called with _FooTask instance as first argument (got NoneType instance instead)
#-----
if obj is None:
return None
if isinstance(self._defn, basestring):
raise IllegalMakefileError(
# '%ss': cheating, I know __str__() endswith an apostrophe
"%ss `%s' attribute is a string, it must "
"be a list (or a method that returns a list)"
% (obj, self._attrname))
if self._cache is None:
if not self._defn:
items = []
elif isinstance(self._defn, (types.FunctionType, types.MethodType)):
items = self._defn(obj) # is either a generator or result list
else:
items = self._defn
# String *deps* can be virtual. If the string matches a defined
# Task then it becomes that Task instance, else it becomes
# a file task.
self._cache = []
for item in items:
assert not isinstance(item, Task)
if isinstance(item, File):
self._cache.append(item)
else:
self._cache += obj.makefile.files_from_path(item)
return self._cache
class PairsListAccessor(object):
"""Descriptor for `<taskgroup>.pairs`.
A "pair" for a task group is 2-tuple representing one
dependency/result part of the group:
(<dependency-path>, <result-path>)
Note: Typically each part of the tuple will be a single path, but
(TODO) multiple deps and/or results should be allowed (e.g. a .java
file producing multiple .class files).
This wraps the definition of the attribute on the class (it can
be a list, a method that results a list, or a generator) so that
accessing this attribute always results in a list of 2-tuples:
(<File-instance-for-dependency-path>,
<File-instance-for-result-path>)
"""
def __init__(self, attrname, defn):
self._attrname = attrname
self._defn = defn
self._cache = None
def __get__(self, obj, objtype):
if isinstance(self._defn, basestring):
raise IllegalMakefileError(
# '%ss': cheating, I know __str__() endswith an apostrophe
"%ss `%s' attribute is a string, it must "
"be a list (or a method that returns a list)"
% (obj, self._attrname))
if self._cache is None:
if not self._defn:
items = []
elif isinstance(self._defn, (types.FunctionType, types.MethodType)):
items = self._defn(obj) # is either a generator or result list
else:
items = self._defn
# If the string matches a defined Task then it becomes that
# Task instance, else it becomes a File.
self._cache = []
for dep, res in items:
#TODO: Current limitation on single or multiple files
# here. Deal with that.
#TODO: should *tasks* be allowed for deps here?
assert "*" not in dep and "?" not in dep
dep_file = (isinstance(dep, File)
and dep
or obj.makefile.files_from_path(dep)[0])
assert "*" not in res and "?" not in res
res_file = (isinstance(res, File)
and res
or obj.makefile.files_from_path(res)[0])
self._cache.append( (dep_file, res_file) )
return self._cache
| 36.279116 | 126 | 0.572757 |
import sys
import os
from os.path import exists, dirname, abspath, normpath, join
import logging
import types
from mklib.common import *
class TaskType(type):
def __init__(cls, name, bases, dct):
super(TaskType, cls).__init__(name, bases, dct)
if dct["__module__"] != __name__:
frame = sys._getframe(1)
try:
makefile = frame.f_globals["_mk_makefile_"]
except KeyError:
return
if issubclass(cls, TaskGroup):
if dct.get("results"):
raise IllegalMakefileError(
"TaskGroup %s (in %s) has a `results' attribute. "
"This is not allowed on TaskGroup classes. "
"Results are defined by the output of `pairs'."
% (cls.__name__, makefile.path))
if dct.get("make"):
raise IllegalMakefileError(
"TaskGroup %s (in %s) has a `make' attribute. "
"This is not allowed on TaskGroup classes. "
% (cls.__name__, makefile.path))
cls.pairs = PairsListAccessor("pairs",
getattr(cls, "pairs", None))
cls.deps = TaskGroupDepsListAccessor("deps",
getattr(cls, "deps", None))
cls.results = TaskGroupResultsListAccessor()
elif issubclass(cls, Alias):
if dct.get("results"):
raise IllegalMakefileError(
"Alias %s (in %s) has a `results' attribute. "
"This is not allowed on Alias classes. "
% (cls.__name__, makefile.path))
cls.deps = TaskOrFileListAccessor("deps",
getattr(cls, "deps", None))
else:
cls.deps = TaskOrFileListAccessor("deps",
getattr(cls, "deps", None))
cls.results = FileListAccessor("results",
getattr(cls, "results", None))
cls.default = dct.get("default", False)
makefile.define_task(cls, name, bases, dct)
if issubclass(cls, TaskGroup):
log_makefile_defn("TaskGroup", name, frame)
else:
log_makefile_defn("Task", name, frame)
class Task(object):
__metaclass__ = TaskType
default = False
def __init__(self, makefile, cfg):
self.name = self.__class__.__name__
self.makefile = makefile
self.dir = makefile.dir
if makefile.ns:
self.nsname = "%s:%s" % (':'.join(makefile.ns), self.name)
else:
self.nsname = self.name
self.cfg = cfg
self.log = logging.getLogger("mk.task." + self.name)
def __repr__(self):
if not self.results:
return "<Task '%s' (virtual)>" % self.nsname
else:
return "<Task '%s'>" % self.nsname
def __str__(self):
return "task `%s'" % self.nsname
def id(self):
# Use for tracking by the TaskMaster
return ("task", self.nsname)
@property
def name_tuple(self):
if self.makefile.ns:
rv = (tuple(self.makefile.ns), self.name)
else:
rv = (None, self.name)
return rv
#TODO: is this necessary? Not really anymore.
def doc(self):
return self.__doc__
# The presence of a make()' implementation on a Task class
#
# This is *not* meant to reflect the state of `self.results' for
# this task, just the task itself. This means that for the base
# class "Task" (a virtual task) this is always False.
# """
class Alias(Task):
def __repr__(self):
return "<Alias '%s'>" % self.nsname
def __str__(self):
return "alias `%s'" % self.nsname
def id(self):
return ("alias", self.nsname)
class TaskGroup(Task):
default = False
def __repr__(self):
return "<TaskGroup '%s'>" % self.nsname
def __str__(self):
return "task group `%s'" % self.nsname
#TODO: is this necessary? Not really anymore.
def doc(self):
return self.__doc__
#TODO: START HERE:
# - add special handling in TaskMaster for TaskGroups to call
# make_pair as necessary
def pairs(self):
raise NotImplementedError("sub-classes must implement pairs()")
def make_pair(self, dep, result):
raise NotImplementedError("sub-classes must implement make_pair()")
class FileType(type):
def __init__(cls, name, bases, dct):
super(FileType, cls).__init__(name, bases, dct)
if dct["__module__"] != __name__:
# Normalizing descriptor for <file>.deps.
# String *results* are assumed to be file result paths
# (hence are transformed into File instances), because
# a base-Task instance -- i.e. a virtual task -- doesn't make
cls.deps = FileListAccessor("deps",
getattr(cls, "deps", None))
# Register this on the Makefile.
frame = sys._getframe(1)
makefile = frame.f_globals["_mk_makefile_"]
makefile.define_file(cls, name, bases, dct)
log_makefile_defn("File", dct["path"], frame)
class File(object):
__metaclass__ = FileType
def __init__(self, path, makefile=None, cfg=None):
# Use absolute paths to guard against process cwd changes.
path = makefile and normpath(join(makefile.dir, path)) or path
self.path = abspath(path)
self.makefile = makefile
self.dir = makefile and makefile.dir or None
self.cfg = cfg
def __repr__(self):
return "<File '%s'>" % self.path
def __str__(self):
return "file `%s'" % self.nicepath
def id(self):
return ("file", self.path)
@property
def relpath(self):
return relpath(self.path)
@property
def nicepath(self):
r = self.relpath
a = self.path
if not sys.platform == "win32":
home = os.environ["HOME"]
if a.startswith(home):
a = "~" + a[len(home):]
if len(r) < len(a):
return r
else:
return a
@property
def deps(self):
return []
def exists(self):
return exists(self.path)
def mtime(self):
return os.stat(self.path).st_mtime
#---- descriptors for some Task and File attributes
class TaskOrFileListAccessor(object):
def __init__(self, attrname, defn):
self._attrname = attrname
self._defn = defn
self._cache = None
def _get_items(self, obj, objtype):
if not self._defn:
items = []
elif isinstance(self._defn, (types.FunctionType, types.MethodType)):
items = self._defn(obj) # is either a generator or result list
else:
items = self._defn
# If the string matches a defined Task then it becomes that
# Task instance, else it becomes a File.
rv = []
for item in items:
if item is None:
continue
if isinstance(item, (File, Task)):
rv.append(item)
else:
rv += obj.makefile.tasks_or_files_from_name(item)
return rv
def __get__(self, obj, objtype):
if isinstance(self._defn, basestring):
raise IllegalMakefileError(
# '%ss': cheating, I know __str__() endswith an apostrophe
"%ss `%s' attribute is a string, it must "
"be a list (or a method that returns a list)"
% (obj, self._attrname))
if self._cache is None:
self._cache = self._get_items(obj, objtype)
return self._cache
class TaskGroupDepsListAccessor(TaskOrFileListAccessor):
def _get_items(self, obj, objtype):
# a 'deps' attribute ...
rv = TaskOrFileListAccessor._get_items(self, obj, objtype)
# ... plus the deps from the individual dep/result "pairs".
rv += [dep for dep, res in obj.pairs]
return rv
class TaskGroupResultsListAccessor(object):
_cache = None
def __get__(self, obj, objtype):
if self._cache is None:
self._cache = [res for dep, res in obj.pairs]
return self._cache
class FileListAccessor(object):
def __init__(self, attrname, defn):
self._attrname = attrname
self._defn = defn
self._cache = None
def __get__(self, obj, objtype):
# This `return None` early out is an attempt to avoid this
# problem when defining a task from another task where the base
# task-class defines a "def results()" generator. I don't
obj is None:
return None
if isinstance(self._defn, basestring):
raise IllegalMakefileError(
"%ss `%s' attribute is a string, it must "
"be a list (or a method that returns a list)"
% (obj, self._attrname))
if self._cache is None:
if not self._defn:
items = []
elif isinstance(self._defn, (types.FunctionType, types.MethodType)):
items = self._defn(obj) # is either a generator or result list
else:
items = self._defn
# String *deps* can be virtual. If the string matches a defined
# Task then it becomes that Task instance, else it becomes
# a file task.
self._cache = []
for item in items:
assert not isinstance(item, Task)
if isinstance(item, File):
self._cache.append(item)
else:
self._cache += obj.makefile.files_from_path(item)
return self._cache
class PairsListAccessor(object):
def __init__(self, attrname, defn):
self._attrname = attrname
self._defn = defn
self._cache = None
def __get__(self, obj, objtype):
if isinstance(self._defn, basestring):
raise IllegalMakefileError(
# '%ss': cheating, I know __str__() endswith an apostrophe
"%ss `%s' attribute is a string, it must "
"be a list (or a method that returns a list)"
% (obj, self._attrname))
if self._cache is None:
if not self._defn:
items = []
elif isinstance(self._defn, (types.FunctionType, types.MethodType)):
items = self._defn(obj)
else:
items = self._defn
self._cache = []
for dep, res in items:
assert "*" not in dep and "?" not in dep
dep_file = (isinstance(dep, File)
and dep
or obj.makefile.files_from_path(dep)[0])
assert "*" not in res and "?" not in res
res_file = (isinstance(res, File)
and res
or obj.makefile.files_from_path(res)[0])
self._cache.append( (dep_file, res_file) )
return self._cache
| true | true |
1c3c58d5aa63dd578762930f9b5bf7ff4332eeb8 | 4,357 | py | Python | consoleme/handlers/v2/generate_changes.py | shyovn/consoleme | 471592b718b22f83244609ab47d5bf3f9a715a4d | [
"Apache-2.0"
] | 2,835 | 2020-12-09T19:07:24.000Z | 2022-03-31T06:38:44.000Z | consoleme/handlers/v2/generate_changes.py | shyovn/consoleme | 471592b718b22f83244609ab47d5bf3f9a715a4d | [
"Apache-2.0"
] | 179 | 2020-12-10T01:51:25.000Z | 2022-03-31T02:06:06.000Z | consoleme/handlers/v2/generate_changes.py | shyovn/consoleme | 471592b718b22f83244609ab47d5bf3f9a715a4d | [
"Apache-2.0"
] | 219 | 2020-12-09T21:30:56.000Z | 2022-03-31T05:57:36.000Z | import sys
import sentry_sdk
from pydantic import ValidationError
from consoleme.config import config
from consoleme.exceptions.exceptions import InvalidRequestParameter
from consoleme.handlers.base import BaseAPIV2Handler
from consoleme.lib.change_request import generate_change_model_array
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.models import ChangeGeneratorModelArray
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
class GenerateChangesHandler(BaseAPIV2Handler):
"""Handler for /api/v2/generate_changes
Generates a ChangeModelArray from ChangeGeneratorModelArray
"""
allowed_methods = ["POST"]
async def post(self):
"""
POST /api/v2/generate_changes
Generates a ChangeModelArray JSON from ChangeGeneratorModelArray JSON.
Request example:
{"changes": [
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/aRole",
"principal_type": "AwsResource"
},
"generator_type": "s3",
"resource_arn": ["arn:aws:s3:::123456789012-bucket"],
"bucket_prefix": "/*",
"effect": "Allow",
"action_groups": [
"get",
"list"
]
}
]}
Response example:
{ "changes" : [
{
"principal": {
"principal_arn": "arn:aws:iam::123456789012:role/aRole",
"principal_type": "AwsResource"
},
"change_type": "inline_policy",
"resource_arn": [
"arn:aws:s3:::123456789012-bucket"
],
"resource": null,
"condition": null,
"policy_name": "cm_user_1592499820_gmli",
"new": true,
"policy": {
"version": "2012-10-17",
"statements": null,
"policy_document": "{\"Version\":\"2012-10-17\",\"Statement\":[[{\"Action\"...",
"policy_sha256": "cb300def8dd1deaf4db2bfeef4bc6fc740be18e8ccae74c399affe781f82ba6e"
},
"old_policy": null
}
]
}
"""
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"ip": self.ip,
"request_id": self.request_uuid,
}
try:
# Validate the model
changes = ChangeGeneratorModelArray.parse_raw(self.request.body)
# Override user attribute for each change
for change in changes.changes:
change.user = self.user
# Loop through the raw json object to retrieve attributes that would be parsed out in the
# ChangeGeneratorModelArray, such as bucket_prefix for S3ChangeGeneratorModel
change_model_array = await generate_change_model_array(changes)
except (InvalidRequestParameter, ValidationError) as e:
log_data["message"] = "Validation Exception"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.validation_exception", tags={"user": self.user}
)
self.write_error(400, message="Error validating input: " + str(e))
return
except Exception as e:
log_data["message"] = "Unknown Exception occurred while generating changes"
log.error(log_data, exc_info=True)
stats.count(f"{log_data['function']}.exception", tags={"user": self.user})
sentry_sdk.capture_exception(tags={"user": self.user})
self.write_error(500, message="Error generating changes: " + str(e))
return
log_data["message"] = "Successfully generated changes requested"
log.info(log_data)
stats.count(f"{log_data['function']}.success", tags={"user": self.user})
self.write(change_model_array.json())
| 37.239316 | 107 | 0.556346 | import sys
import sentry_sdk
from pydantic import ValidationError
from consoleme.config import config
from consoleme.exceptions.exceptions import InvalidRequestParameter
from consoleme.handlers.base import BaseAPIV2Handler
from consoleme.lib.change_request import generate_change_model_array
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.models import ChangeGeneratorModelArray
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
class GenerateChangesHandler(BaseAPIV2Handler):
allowed_methods = ["POST"]
async def post(self):
log_data = {
"user": self.user,
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"user-agent": self.request.headers.get("User-Agent"),
"ip": self.ip,
"request_id": self.request_uuid,
}
try:
changes = ChangeGeneratorModelArray.parse_raw(self.request.body)
for change in changes.changes:
change.user = self.user
change_model_array = await generate_change_model_array(changes)
except (InvalidRequestParameter, ValidationError) as e:
log_data["message"] = "Validation Exception"
log.error(log_data, exc_info=True)
stats.count(
f"{log_data['function']}.validation_exception", tags={"user": self.user}
)
self.write_error(400, message="Error validating input: " + str(e))
return
except Exception as e:
log_data["message"] = "Unknown Exception occurred while generating changes"
log.error(log_data, exc_info=True)
stats.count(f"{log_data['function']}.exception", tags={"user": self.user})
sentry_sdk.capture_exception(tags={"user": self.user})
self.write_error(500, message="Error generating changes: " + str(e))
return
log_data["message"] = "Successfully generated changes requested"
log.info(log_data)
stats.count(f"{log_data['function']}.success", tags={"user": self.user})
self.write(change_model_array.json())
| true | true |
1c3c59272f0b216c9e1a63e8019a1742ca1d1695 | 418 | py | Python | jobs/models.py | bharatchanddandamudi/Bharatchand_Portfolio_V1.1-deploy- | 21205ec9d3263463b43422b4679b9736142f7308 | [
"MIT"
] | null | null | null | jobs/models.py | bharatchanddandamudi/Bharatchand_Portfolio_V1.1-deploy- | 21205ec9d3263463b43422b4679b9736142f7308 | [
"MIT"
] | null | null | null | jobs/models.py | bharatchanddandamudi/Bharatchand_Portfolio_V1.1-deploy- | 21205ec9d3263463b43422b4679b9736142f7308 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
# Create your models here.
class Job(models.Model):
image = models.ImageField(upload_to='images/')
summary = models.CharField(max_length=5000)
# video = models.FileField(upload_to='images/',null=True)
def __str__(self):
return self.summary
def get_absolute_url(self):
return reverse('links', kwargs={"pk": self.pk}) | 24.588235 | 61 | 0.696172 | from django.db import models
from django.urls import reverse
class Job(models.Model):
image = models.ImageField(upload_to='images/')
summary = models.CharField(max_length=5000)
def __str__(self):
return self.summary
def get_absolute_url(self):
return reverse('links', kwargs={"pk": self.pk}) | true | true |
1c3c595ef0b86a94121f9cb02a0e60c0c392bebe | 3,673 | py | Python | cortex/mapper/samplers.py | mvdoc/pycortex | bc8a93cac9518e3c1cd89650c703f9f3814e805b | [
"BSD-2-Clause"
] | 423 | 2015-01-06T02:46:46.000Z | 2022-03-23T17:20:38.000Z | cortex/mapper/samplers.py | mvdoc/pycortex | bc8a93cac9518e3c1cd89650c703f9f3814e805b | [
"BSD-2-Clause"
] | 243 | 2015-01-03T02:10:03.000Z | 2022-03-31T19:29:48.000Z | cortex/mapper/samplers.py | mvdoc/pycortex | bc8a93cac9518e3c1cd89650c703f9f3814e805b | [
"BSD-2-Clause"
] | 136 | 2015-03-23T20:35:59.000Z | 2022-03-09T13:39:10.000Z | import numpy as np
def collapse(j, data):
"""Collapses samples into a single row"""
uniques = np.unique(j)
return uniques, np.array([data[j == u].sum() for u in uniques])
def nearest(coords, shape, **kwargs):
valid = ~(np.isnan(coords).all(1))
valid = np.logical_and(valid, np.logical_and(coords[:,0] > -.5, coords[:,0] < shape[2]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,1] > -.5, coords[:,1] < shape[1]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,2] > -.5, coords[:,2] < shape[0]+.5))
rcoords = coords[valid].round().astype(int)
j = np.ravel_multi_index(rcoords.T[::-1], shape, mode='clip')
#return np.nonzero(valid)[0], j, (rcoords > 0).all(1) #np.ones((valid.sum(),))
return np.nonzero(valid)[0], j, np.ones((valid.sum(),))
def trilinear(coords, shape, **kwargs):
#trilinear interpolation equation from http://paulbourke.net/miscellaneous/interpolation/
valid = ~(np.isnan(coords).all(1))
(x, y, z), floor = np.modf(coords[valid].T)
floor = floor.astype(int)
ceil = floor + 1
x[x < 0] = 0
y[y < 0] = 0
z[z < 0] = 0
i000 = np.array([floor[2], floor[1], floor[0]])
i100 = np.array([floor[2], floor[1], ceil[0]])
i010 = np.array([floor[2], ceil[1], floor[0]])
i001 = np.array([ ceil[2], floor[1], floor[0]])
i101 = np.array([ ceil[2], floor[1], ceil[0]])
i011 = np.array([ ceil[2], ceil[1], floor[0]])
i110 = np.array([floor[2], ceil[1], ceil[0]])
i111 = np.array([ ceil[2], ceil[1], ceil[0]])
v000 = (1-x)*(1-y)*(1-z)
v100 = x*(1-y)*(1-z)
v010 = (1-x)*y*(1-z)
v110 = x*y*(1-z)
v001 = (1-x)*(1-y)*z
v101 = x*(1-y)*z
v011 = (1-x)*y*z
v111 = x*y*z
i = np.tile(np.nonzero(valid)[0], [1, 8]).ravel()
j = np.hstack([i000, i100, i010, i001, i101, i011, i110, i111])
data = np.vstack([v000, v100, v010, v001, v101, v011, v110, v111]).ravel()
return i, np.ravel_multi_index(j, shape, mode='clip'), data
def distance_func(func, coords, shape, renorm=True, mp=True):
"""Generates masks for seperable distance functions"""
nZ, nY, nX = shape
dx = coords[:,0] - np.atleast_2d(np.arange(nX)).T
dy = coords[:,1] - np.atleast_2d(np.arange(nY)).T
dz = coords[:,2] - np.atleast_2d(np.arange(nZ)).T
Lx, Ly, Lz = func(dx), func(dy), func(dz)
ix, jx = np.nonzero(Lx)
iy, jy = np.nonzero(Ly)
iz, jz = np.nonzero(Lz)
ba = np.broadcast_arrays
def func(v):
mx, my, mz = ix[jx == v], iy[jy == v], iz[jz == v]
idx, idy, idz = [i.ravel() for i in ba(*np.ix_(mx, my, mz))]
vx, vy, vz = [i.ravel() for i in ba(*np.ix_(Lx[mx, v], Ly[my, v], Lz[mz, v]))]
i = v * np.ones((len(idx,)))
j = np.ravel_multi_index((idz, idy, idx), shape, mode='clip')
data = vx*vy*vz
if renorm:
data /= data.sum()
return i, j, data
if mp:
from .. import mp
ijdata = mp.map(func, range(len(coords)))
else:
#ijdata = map(func, range(len(coords)))
ijdata = [func(x) for x in range(len(coords))]
return np.hstack(ijdata)
def gaussian(coords, shape, sigma=1, window=3, **kwargs):
raise NotImplementedError
def gaussian(x):
pass
return distance_func(gaussian, coords, shape, **kwargs)
def lanczos(coords, shape, window=3, **kwargs):
def lanczos(x):
out = np.zeros_like(x)
sel = np.abs(x)<window
selx = x[sel]
out[sel] = np.sin(np.pi * selx) * np.sin(np.pi * selx / window) * (window / (np.pi**2 * selx**2))
return out
return distance_func(lanczos, coords, shape, **kwargs)
| 36.73 | 105 | 0.564661 | import numpy as np
def collapse(j, data):
uniques = np.unique(j)
return uniques, np.array([data[j == u].sum() for u in uniques])
def nearest(coords, shape, **kwargs):
valid = ~(np.isnan(coords).all(1))
valid = np.logical_and(valid, np.logical_and(coords[:,0] > -.5, coords[:,0] < shape[2]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,1] > -.5, coords[:,1] < shape[1]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,2] > -.5, coords[:,2] < shape[0]+.5))
rcoords = coords[valid].round().astype(int)
j = np.ravel_multi_index(rcoords.T[::-1], shape, mode='clip')
alid)[0], j, np.ones((valid.sum(),))
def trilinear(coords, shape, **kwargs):
valid = ~(np.isnan(coords).all(1))
(x, y, z), floor = np.modf(coords[valid].T)
floor = floor.astype(int)
ceil = floor + 1
x[x < 0] = 0
y[y < 0] = 0
z[z < 0] = 0
i000 = np.array([floor[2], floor[1], floor[0]])
i100 = np.array([floor[2], floor[1], ceil[0]])
i010 = np.array([floor[2], ceil[1], floor[0]])
i001 = np.array([ ceil[2], floor[1], floor[0]])
i101 = np.array([ ceil[2], floor[1], ceil[0]])
i011 = np.array([ ceil[2], ceil[1], floor[0]])
i110 = np.array([floor[2], ceil[1], ceil[0]])
i111 = np.array([ ceil[2], ceil[1], ceil[0]])
v000 = (1-x)*(1-y)*(1-z)
v100 = x*(1-y)*(1-z)
v010 = (1-x)*y*(1-z)
v110 = x*y*(1-z)
v001 = (1-x)*(1-y)*z
v101 = x*(1-y)*z
v011 = (1-x)*y*z
v111 = x*y*z
i = np.tile(np.nonzero(valid)[0], [1, 8]).ravel()
j = np.hstack([i000, i100, i010, i001, i101, i011, i110, i111])
data = np.vstack([v000, v100, v010, v001, v101, v011, v110, v111]).ravel()
return i, np.ravel_multi_index(j, shape, mode='clip'), data
def distance_func(func, coords, shape, renorm=True, mp=True):
nZ, nY, nX = shape
dx = coords[:,0] - np.atleast_2d(np.arange(nX)).T
dy = coords[:,1] - np.atleast_2d(np.arange(nY)).T
dz = coords[:,2] - np.atleast_2d(np.arange(nZ)).T
Lx, Ly, Lz = func(dx), func(dy), func(dz)
ix, jx = np.nonzero(Lx)
iy, jy = np.nonzero(Ly)
iz, jz = np.nonzero(Lz)
ba = np.broadcast_arrays
def func(v):
mx, my, mz = ix[jx == v], iy[jy == v], iz[jz == v]
idx, idy, idz = [i.ravel() for i in ba(*np.ix_(mx, my, mz))]
vx, vy, vz = [i.ravel() for i in ba(*np.ix_(Lx[mx, v], Ly[my, v], Lz[mz, v]))]
i = v * np.ones((len(idx,)))
j = np.ravel_multi_index((idz, idy, idx), shape, mode='clip')
data = vx*vy*vz
if renorm:
data /= data.sum()
return i, j, data
if mp:
from .. import mp
ijdata = mp.map(func, range(len(coords)))
else:
ijdata = [func(x) for x in range(len(coords))]
return np.hstack(ijdata)
def gaussian(coords, shape, sigma=1, window=3, **kwargs):
raise NotImplementedError
def gaussian(x):
pass
return distance_func(gaussian, coords, shape, **kwargs)
def lanczos(coords, shape, window=3, **kwargs):
def lanczos(x):
out = np.zeros_like(x)
sel = np.abs(x)<window
selx = x[sel]
out[sel] = np.sin(np.pi * selx) * np.sin(np.pi * selx / window) * (window / (np.pi**2 * selx**2))
return out
return distance_func(lanczos, coords, shape, **kwargs)
| true | true |
1c3c59f62db6ebd3d9c62250d075bec1b3a1d338 | 1,106 | py | Python | test/travis_test_wall_stop.py | NaoUsagi/pimouse_run_corridor | ac217dea819dace826f9e39083f2c85084ab02bf | [
"BSD-3-Clause"
] | null | null | null | test/travis_test_wall_stop.py | NaoUsagi/pimouse_run_corridor | ac217dea819dace826f9e39083f2c85084ab02bf | [
"BSD-3-Clause"
] | null | null | null | test/travis_test_wall_stop.py | NaoUsagi/pimouse_run_corridor | ac217dea819dace826f9e39083f2c85084ab02bf | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import unittest, rostest
import rosnode, rospy
import time
class WallStopTest(unittest.TestCase):
def set_and_get(self,lf,ls,rs,rf):
with open("/dev/rtlightsensor0","w") as f:
f.write("%d %d %d %d\n" % (rf,rs,ls,lf))
time.sleep(0.3)
with open("/dev/rtmotor_raw_l0","r") as lf,\
open("/dev/rtmotor_raw_r0","r") as rf:
left = int(lf.readline().rstrip())
right = int(rf.readline().rstrip())
return left, right
def test_io(self):
left, right = self.set_and_get(400,100,100,0) #total: 600
self.assertTrue(left == 0 and right == 0,"can't stop")
left, right = self.set_and_get(400,0,0,99) #total: 499
self.assertTrue(left != 0 and right != 0,"can't move again")
left, right = self.set_and_get(150,0,200,150) #total: 500
self.assertTrue(left == 0 and right == 0,"can't stop")
if __name__ == '__main__':
time.sleep(3)
rospy.init_node('travis_test_wall_stop')
rostest.rosrun('pimouse_run_corridor','travis_test_wall_stop',WallStopTest)
| 31.6 | 79 | 0.613924 |
import unittest, rostest
import rosnode, rospy
import time
class WallStopTest(unittest.TestCase):
def set_and_get(self,lf,ls,rs,rf):
with open("/dev/rtlightsensor0","w") as f:
f.write("%d %d %d %d\n" % (rf,rs,ls,lf))
time.sleep(0.3)
with open("/dev/rtmotor_raw_l0","r") as lf,\
open("/dev/rtmotor_raw_r0","r") as rf:
left = int(lf.readline().rstrip())
right = int(rf.readline().rstrip())
return left, right
def test_io(self):
left, right = self.set_and_get(400,100,100,0)
self.assertTrue(left == 0 and right == 0,"can't stop")
left, right = self.set_and_get(400,0,0,99) #total: 499
self.assertTrue(left != 0 and right != 0,"can't move again")
left, right = self.set_and_get(150,0,200,150)
self.assertTrue(left == 0 and right == 0,"can't stop")
if __name__ == '__main__':
time.sleep(3)
rospy.init_node('travis_test_wall_stop')
rostest.rosrun('pimouse_run_corridor','travis_test_wall_stop',WallStopTest)
| true | true |
1c3c5a046b6fcd87fb0f3de3efd5563e6f04c068 | 2,444 | py | Python | main_investment.py | ginkgodango/lgs1 | d683c4b9aa620a14862d7ff961c6b3a0e6720507 | [
"MIT"
] | null | null | null | main_investment.py | ginkgodango/lgs1 | d683c4b9aa620a14862d7ff961c6b3a0e6720507 | [
"MIT"
] | null | null | null | main_investment.py | ginkgodango/lgs1 | d683c4b9aa620a14862d7ff961c6b3a0e6720507 | [
"MIT"
] | null | null | null | import datetime as dt
import pandas as pd
import investment.extraction
directory = 'D:/automation/final/investment/2019/04/'
update_filename = 'LGSS Preliminary Performance April 2019_Addkeys.xlsx'
returns_filename = 'returns_NOF_201903.csv'
market_values_filename = 'market_values_NOF_201903.csv'
table_filename = 'table_NOF_201903.csv'
report_date = dt.datetime(2019, 4, 30)
MTD = 10
days_in_month = 30
SSgACUSTOM_Index = 2.32
EOAS_Index = 3.36
MXEF_Index = 3.05
"""
Loads the new JPM monthly report
"""
df_update = investment.extraction.load_update(directory + update_filename)
df_update = investment.extraction.add_report_date(df_update, report_date)
df_update = investment.extraction.clean(df_update)
# df_update.to_csv(directory + 'df_update_' + str(report_date.date()) + '.csv', index=True)
"""
Adds new month returns to existing return time-series.
"""
df_returns = investment.extraction.load_returns(directory + returns_filename)
missing_returns_list = investment.extraction.update_check_missing_returns(df_returns, df_update)
new_returns_list = investment.extraction.update_check_new_returns(df_returns, df_update)
update_dict = investment.extraction.create_update_dict(df_update, days_in_month, SSgACUSTOM_Index, EOAS_Index, MXEF_Index)
df_updater = investment.extraction.update_dict_to_df(update_dict, report_date)
df_returns = investment.extraction.apply_update_to_df_returns(df_returns, df_updater)
# df_returns.to_csv(directory + 'returns_NOF_201904.csv')
"""
Adds new month market values to existing market values time-series.
"""
df_market_values = investment.extraction.load_market_values(directory + market_values_filename)
missing_market_values_list = investment.extraction.update_check_missing_market_values(df_market_values, df_update)
new_market_values_list = investment.extraction.update_check_new_market_values(df_market_values, df_update)
update_market_values_dict = investment.extraction.create_update_market_value_dict(df_update)
df_updater_market_values = investment.extraction.update_market_values_dict_to_df(update_market_values_dict, report_date)
df_market_values = investment.extraction.apply_update_to_df_market_values(df_market_values, df_updater_market_values)
# df_market_values.to_csv(directory + 'market_values_NOF_201904.csv')
"""
Adds FX sector return and market value
"""
df_returns['FX'] = df_returns['IECurrencyOverlay_IE']
df_market_values['FX'] = df_market_values['IECurrencyOverlay_IE']
| 44.436364 | 122 | 0.837152 | import datetime as dt
import pandas as pd
import investment.extraction
directory = 'D:/automation/final/investment/2019/04/'
update_filename = 'LGSS Preliminary Performance April 2019_Addkeys.xlsx'
returns_filename = 'returns_NOF_201903.csv'
market_values_filename = 'market_values_NOF_201903.csv'
table_filename = 'table_NOF_201903.csv'
report_date = dt.datetime(2019, 4, 30)
MTD = 10
days_in_month = 30
SSgACUSTOM_Index = 2.32
EOAS_Index = 3.36
MXEF_Index = 3.05
df_update = investment.extraction.load_update(directory + update_filename)
df_update = investment.extraction.add_report_date(df_update, report_date)
df_update = investment.extraction.clean(df_update)
df_returns = investment.extraction.load_returns(directory + returns_filename)
missing_returns_list = investment.extraction.update_check_missing_returns(df_returns, df_update)
new_returns_list = investment.extraction.update_check_new_returns(df_returns, df_update)
update_dict = investment.extraction.create_update_dict(df_update, days_in_month, SSgACUSTOM_Index, EOAS_Index, MXEF_Index)
df_updater = investment.extraction.update_dict_to_df(update_dict, report_date)
df_returns = investment.extraction.apply_update_to_df_returns(df_returns, df_updater)
df_market_values = investment.extraction.load_market_values(directory + market_values_filename)
missing_market_values_list = investment.extraction.update_check_missing_market_values(df_market_values, df_update)
new_market_values_list = investment.extraction.update_check_new_market_values(df_market_values, df_update)
update_market_values_dict = investment.extraction.create_update_market_value_dict(df_update)
df_updater_market_values = investment.extraction.update_market_values_dict_to_df(update_market_values_dict, report_date)
df_market_values = investment.extraction.apply_update_to_df_market_values(df_market_values, df_updater_market_values)
df_returns['FX'] = df_returns['IECurrencyOverlay_IE']
df_market_values['FX'] = df_market_values['IECurrencyOverlay_IE']
| true | true |
1c3c5a1d897bb010f0311515c616d2600ee219ca | 9,661 | py | Python | gomatic/go_cd_configurator.py | agsmorodin/gomatic | e6ae871ffc2d027823f6b7a5755e0ac65c724538 | [
"MIT"
] | null | null | null | gomatic/go_cd_configurator.py | agsmorodin/gomatic | e6ae871ffc2d027823f6b7a5755e0ac65c724538 | [
"MIT"
] | null | null | null | gomatic/go_cd_configurator.py | agsmorodin/gomatic | e6ae871ffc2d027823f6b7a5755e0ac65c724538 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import json
import xml.etree.ElementTree as ET
import argparse
import sys
import subprocess
import requests
from decimal import Decimal
from gomatic.gocd.pipelines import Pipeline, PipelineGroup
from gomatic.gocd.agents import Agent
from gomatic.xml_operations import Ensurance, PossiblyMissingElement, move_all_to_end, prettify
class HostRestClient:
def __init__(self, host):
self.__host = host if host.startswith('http://') else 'http://%s' % host
def __repr__(self):
return 'HostRestClient("%s")' % self.__host
def __path(self, path):
return self.__host + path
def get(self, path):
return requests.get(self.__path(path))
def post(self, path, data):
url = self.__path(path)
result = requests.post(url, data)
if result.status_code != 200:
try:
result_json = json.loads(result.text.replace("\\'", "'"))
message = result_json.get('result', result.text)
raise RuntimeError("Could not post config to Go server (%s):\n%s" % (url, message))
except ValueError:
raise RuntimeError("Could not post config to Go server (%s) (and result was not json):\n%s" % (url, result))
class GoCdConfigurator(object):
def __init__(self, host_rest_client):
self.__host_rest_client = host_rest_client
self.__set_initial_config_xml()
def __set_initial_config_xml(self):
self.__initial_config, self._initial_md5 = self.__current_config_response()
self.__xml_root = ET.fromstring(self.__initial_config)
def __repr__(self):
return "GoCdConfigurator(%s)" % self.__host_rest_client
def as_python(self, pipeline, with_save=True):
result = "#!/usr/bin/env python\nfrom gomatic import *\n\nconfigurator = " + str(self) + "\n"
result += "pipeline = configurator"
result += pipeline.as_python_commands_applied_to_server()
save_part = ""
if with_save:
save_part = "\n\nconfigurator.save_updated_config(save_config_locally=True, dry_run=True)"
return result + save_part
@property
def current_config(self):
return self.__current_config_response()[0]
def __current_config_response(self):
response = self.__host_rest_client.get("/go/admin/restful/configuration/file/GET/xml")
return response.text, response.headers['x-cruise-config-md5']
def reorder_elements_to_please_go(self):
move_all_to_end(self.__xml_root, 'pipelines')
move_all_to_end(self.__xml_root, 'templates')
move_all_to_end(self.__xml_root, 'environments')
move_all_to_end(self.__xml_root, 'agents')
for pipeline in self.pipelines:
pipeline.reorder_elements_to_please_go()
for template in self.templates:
template.reorder_elements_to_please_go()
@property
def config(self):
self.reorder_elements_to_please_go()
return ET.tostring(self.__xml_root, 'utf-8')
@property
def artifacts_dir(self):
return self.__possibly_missing_server_element().attribute('artifactsdir')
@artifacts_dir.setter
def artifacts_dir(self, artifacts_dir):
self.__server_element_ensurance().set('artifactsdir', artifacts_dir)
@property
def site_url(self):
return self.__possibly_missing_server_element().attribute('siteUrl')
@site_url.setter
def site_url(self, site_url):
self.__server_element_ensurance().set('siteUrl', site_url)
@property
def agent_auto_register_key(self):
return self.__possibly_missing_server_element().attribute('agentAutoRegisterKey')
@agent_auto_register_key.setter
def agent_auto_register_key(self, agent_auto_register_key):
self.__server_element_ensurance().set('agentAutoRegisterKey', agent_auto_register_key)
@property
def purge_start(self):
return self.__server_decimal_attribute('purgeStart')
@purge_start.setter
def purge_start(self, purge_start_decimal):
assert isinstance(purge_start_decimal, Decimal)
self.__server_element_ensurance().set('purgeStart', str(purge_start_decimal))
@property
def purge_upto(self):
return self.__server_decimal_attribute('purgeUpto')
@purge_upto.setter
def purge_upto(self, purge_upto_decimal):
assert isinstance(purge_upto_decimal, Decimal)
self.__server_element_ensurance().set('purgeUpto', str(purge_upto_decimal))
def __server_decimal_attribute(self, attribute_name):
attribute = self.__possibly_missing_server_element().attribute(attribute_name)
return Decimal(attribute) if attribute else None
def __possibly_missing_server_element(self):
return PossiblyMissingElement(self.__xml_root).possibly_missing_child('server')
def __server_element_ensurance(self):
return Ensurance(self.__xml_root).ensure_child('server')
@property
def pipeline_groups(self):
return [PipelineGroup(e, self) for e in self.__xml_root.findall('pipelines')]
def ensure_pipeline_group(self, group_name):
pipeline_group_element = Ensurance(self.__xml_root).ensure_child_with_attribute("pipelines", "group", group_name)
return PipelineGroup(pipeline_group_element.element, self)
def ensure_removal_of_pipeline_group(self, group_name):
matching = [g for g in self.pipeline_groups if g.name == group_name]
for group in matching:
self.__xml_root.remove(group.element)
return self
def remove_all_pipeline_groups(self):
for e in self.__xml_root.findall('pipelines'):
self.__xml_root.remove(e)
return self
@property
def agents(self):
return [Agent(e) for e in PossiblyMissingElement(self.__xml_root).possibly_missing_child('agents').findall('agent')]
def ensure_removal_of_agent(self, hostname):
matching = [agent for agent in self.agents if agent.hostname == hostname]
for agent in matching:
Ensurance(self.__xml_root).ensure_child('agents').element.remove(agent._element)
return self
@property
def pipelines(self):
result = []
groups = self.pipeline_groups
for group in groups:
result.extend(group.pipelines)
return result
@property
def templates(self):
return [Pipeline(e, 'templates') for e in PossiblyMissingElement(self.__xml_root).possibly_missing_child('templates').findall('pipeline')]
def ensure_template(self, template_name):
pipeline_element = Ensurance(self.__xml_root).ensure_child('templates').ensure_child_with_attribute('pipeline', 'name', template_name).element
return Pipeline(pipeline_element, 'templates')
def ensure_replacement_of_template(self, template_name):
template = self.ensure_template(template_name)
template.make_empty()
return template
def ensure_removal_of_template(self, template_name):
matching = [template for template in self.templates if template.name == template_name]
root = Ensurance(self.__xml_root)
templates_element = root.ensure_child('templates').element
for template in matching:
templates_element.remove(template.element)
if len(self.templates) == 0:
root.element.remove(templates_element)
return self
@property
def git_urls(self):
return [pipeline.git_url for pipeline in self.pipelines if pipeline.has_single_git_material]
@property
def has_changes(self):
return prettify(self.__initial_config) != prettify(self.config)
def save_updated_config(self, save_config_locally=False, dry_run=False):
config_before = prettify(self.__initial_config)
config_after = prettify(self.config)
if save_config_locally:
open('config-before.xml', 'w').write(config_before.encode('utf-8'))
open('config-after.xml', 'w').write(config_after.encode('utf-8'))
def has_kdiff3():
try:
return subprocess.call(["kdiff3", "-version"]) == 0
except:
return False
if dry_run and config_before != config_after and has_kdiff3():
subprocess.call(["kdiff3", "config-before.xml", "config-after.xml"])
data = {
'xmlFile': self.config,
'md5': self._initial_md5
}
if not dry_run and config_before != config_after:
self.__host_rest_client.post('/go/admin/restful/configuration/file/POST/xml', data)
self.__set_initial_config_xml()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Gomatic is an API for configuring GoCD. '
'Run python -m gomatic.go_cd_configurator to reverse engineer code to configure an existing pipeline.')
parser.add_argument('-s', '--server', help='the go server (e.g. "localhost:8153" or "my.gocd.com")')
parser.add_argument('-p', '--pipeline', help='the name of the pipeline to reverse-engineer the config for')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
go_server = GoCdConfigurator(HostRestClient(args.server))
matching_pipelines = [p for p in go_server.pipelines if p.name == args.pipeline]
if len(matching_pipelines) != 1:
raise RuntimeError("Should have found one matching pipeline but found %s" % matching_pipelines)
pipeline = matching_pipelines[0]
print(go_server.as_python(pipeline))
| 38.799197 | 152 | 0.686368 |
import json
import xml.etree.ElementTree as ET
import argparse
import sys
import subprocess
import requests
from decimal import Decimal
from gomatic.gocd.pipelines import Pipeline, PipelineGroup
from gomatic.gocd.agents import Agent
from gomatic.xml_operations import Ensurance, PossiblyMissingElement, move_all_to_end, prettify
class HostRestClient:
def __init__(self, host):
self.__host = host if host.startswith('http://') else 'http://%s' % host
def __repr__(self):
return 'HostRestClient("%s")' % self.__host
def __path(self, path):
return self.__host + path
def get(self, path):
return requests.get(self.__path(path))
def post(self, path, data):
url = self.__path(path)
result = requests.post(url, data)
if result.status_code != 200:
try:
result_json = json.loads(result.text.replace("\\'", "'"))
message = result_json.get('result', result.text)
raise RuntimeError("Could not post config to Go server (%s):\n%s" % (url, message))
except ValueError:
raise RuntimeError("Could not post config to Go server (%s) (and result was not json):\n%s" % (url, result))
class GoCdConfigurator(object):
def __init__(self, host_rest_client):
self.__host_rest_client = host_rest_client
self.__set_initial_config_xml()
def __set_initial_config_xml(self):
self.__initial_config, self._initial_md5 = self.__current_config_response()
self.__xml_root = ET.fromstring(self.__initial_config)
def __repr__(self):
return "GoCdConfigurator(%s)" % self.__host_rest_client
def as_python(self, pipeline, with_save=True):
result = "#!/usr/bin/env python\nfrom gomatic import *\n\nconfigurator = " + str(self) + "\n"
result += "pipeline = configurator"
result += pipeline.as_python_commands_applied_to_server()
save_part = ""
if with_save:
save_part = "\n\nconfigurator.save_updated_config(save_config_locally=True, dry_run=True)"
return result + save_part
@property
def current_config(self):
return self.__current_config_response()[0]
def __current_config_response(self):
response = self.__host_rest_client.get("/go/admin/restful/configuration/file/GET/xml")
return response.text, response.headers['x-cruise-config-md5']
def reorder_elements_to_please_go(self):
move_all_to_end(self.__xml_root, 'pipelines')
move_all_to_end(self.__xml_root, 'templates')
move_all_to_end(self.__xml_root, 'environments')
move_all_to_end(self.__xml_root, 'agents')
for pipeline in self.pipelines:
pipeline.reorder_elements_to_please_go()
for template in self.templates:
template.reorder_elements_to_please_go()
@property
def config(self):
self.reorder_elements_to_please_go()
return ET.tostring(self.__xml_root, 'utf-8')
@property
def artifacts_dir(self):
return self.__possibly_missing_server_element().attribute('artifactsdir')
@artifacts_dir.setter
def artifacts_dir(self, artifacts_dir):
self.__server_element_ensurance().set('artifactsdir', artifacts_dir)
@property
def site_url(self):
return self.__possibly_missing_server_element().attribute('siteUrl')
@site_url.setter
def site_url(self, site_url):
self.__server_element_ensurance().set('siteUrl', site_url)
@property
def agent_auto_register_key(self):
return self.__possibly_missing_server_element().attribute('agentAutoRegisterKey')
@agent_auto_register_key.setter
def agent_auto_register_key(self, agent_auto_register_key):
self.__server_element_ensurance().set('agentAutoRegisterKey', agent_auto_register_key)
@property
def purge_start(self):
return self.__server_decimal_attribute('purgeStart')
@purge_start.setter
def purge_start(self, purge_start_decimal):
assert isinstance(purge_start_decimal, Decimal)
self.__server_element_ensurance().set('purgeStart', str(purge_start_decimal))
@property
def purge_upto(self):
return self.__server_decimal_attribute('purgeUpto')
@purge_upto.setter
def purge_upto(self, purge_upto_decimal):
assert isinstance(purge_upto_decimal, Decimal)
self.__server_element_ensurance().set('purgeUpto', str(purge_upto_decimal))
def __server_decimal_attribute(self, attribute_name):
attribute = self.__possibly_missing_server_element().attribute(attribute_name)
return Decimal(attribute) if attribute else None
def __possibly_missing_server_element(self):
return PossiblyMissingElement(self.__xml_root).possibly_missing_child('server')
def __server_element_ensurance(self):
return Ensurance(self.__xml_root).ensure_child('server')
@property
def pipeline_groups(self):
return [PipelineGroup(e, self) for e in self.__xml_root.findall('pipelines')]
def ensure_pipeline_group(self, group_name):
pipeline_group_element = Ensurance(self.__xml_root).ensure_child_with_attribute("pipelines", "group", group_name)
return PipelineGroup(pipeline_group_element.element, self)
def ensure_removal_of_pipeline_group(self, group_name):
matching = [g for g in self.pipeline_groups if g.name == group_name]
for group in matching:
self.__xml_root.remove(group.element)
return self
def remove_all_pipeline_groups(self):
for e in self.__xml_root.findall('pipelines'):
self.__xml_root.remove(e)
return self
@property
def agents(self):
return [Agent(e) for e in PossiblyMissingElement(self.__xml_root).possibly_missing_child('agents').findall('agent')]
def ensure_removal_of_agent(self, hostname):
matching = [agent for agent in self.agents if agent.hostname == hostname]
for agent in matching:
Ensurance(self.__xml_root).ensure_child('agents').element.remove(agent._element)
return self
@property
def pipelines(self):
result = []
groups = self.pipeline_groups
for group in groups:
result.extend(group.pipelines)
return result
@property
def templates(self):
return [Pipeline(e, 'templates') for e in PossiblyMissingElement(self.__xml_root).possibly_missing_child('templates').findall('pipeline')]
def ensure_template(self, template_name):
pipeline_element = Ensurance(self.__xml_root).ensure_child('templates').ensure_child_with_attribute('pipeline', 'name', template_name).element
return Pipeline(pipeline_element, 'templates')
def ensure_replacement_of_template(self, template_name):
template = self.ensure_template(template_name)
template.make_empty()
return template
def ensure_removal_of_template(self, template_name):
matching = [template for template in self.templates if template.name == template_name]
root = Ensurance(self.__xml_root)
templates_element = root.ensure_child('templates').element
for template in matching:
templates_element.remove(template.element)
if len(self.templates) == 0:
root.element.remove(templates_element)
return self
@property
def git_urls(self):
return [pipeline.git_url for pipeline in self.pipelines if pipeline.has_single_git_material]
@property
def has_changes(self):
return prettify(self.__initial_config) != prettify(self.config)
def save_updated_config(self, save_config_locally=False, dry_run=False):
config_before = prettify(self.__initial_config)
config_after = prettify(self.config)
if save_config_locally:
open('config-before.xml', 'w').write(config_before.encode('utf-8'))
open('config-after.xml', 'w').write(config_after.encode('utf-8'))
def has_kdiff3():
try:
return subprocess.call(["kdiff3", "-version"]) == 0
except:
return False
if dry_run and config_before != config_after and has_kdiff3():
subprocess.call(["kdiff3", "config-before.xml", "config-after.xml"])
data = {
'xmlFile': self.config,
'md5': self._initial_md5
}
if not dry_run and config_before != config_after:
self.__host_rest_client.post('/go/admin/restful/configuration/file/POST/xml', data)
self.__set_initial_config_xml()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Gomatic is an API for configuring GoCD. '
'Run python -m gomatic.go_cd_configurator to reverse engineer code to configure an existing pipeline.')
parser.add_argument('-s', '--server', help='the go server (e.g. "localhost:8153" or "my.gocd.com")')
parser.add_argument('-p', '--pipeline', help='the name of the pipeline to reverse-engineer the config for')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
go_server = GoCdConfigurator(HostRestClient(args.server))
matching_pipelines = [p for p in go_server.pipelines if p.name == args.pipeline]
if len(matching_pipelines) != 1:
raise RuntimeError("Should have found one matching pipeline but found %s" % matching_pipelines)
pipeline = matching_pipelines[0]
print(go_server.as_python(pipeline))
| true | true |
1c3c5afa6632336d33ed6b321487b86832d026ae | 4,065 | py | Python | pypyr/pypeloaders/fileloader.py | Reskov/pypyr | 67bc1795493c19e648e12f776a644f92e3bd2fc8 | [
"Apache-2.0"
] | 261 | 2020-08-18T19:31:29.000Z | 2022-03-31T14:54:06.000Z | pypyr/pypeloaders/fileloader.py | Reskov/pypyr | 67bc1795493c19e648e12f776a644f92e3bd2fc8 | [
"Apache-2.0"
] | 73 | 2020-08-14T20:21:14.000Z | 2022-03-14T14:00:16.000Z | pypyr/pypeloaders/fileloader.py | Reskov/pypyr | 67bc1795493c19e648e12f776a644f92e3bd2fc8 | [
"Apache-2.0"
] | 15 | 2020-09-30T12:15:50.000Z | 2022-03-30T07:25:40.000Z | """Load pipelines from local disk."""
import logging
from pathlib import Path
from pypyr.errors import PipelineNotFoundError
import pypyr.moduleloader
import pypyr.yaml
# use pypyr logger to ensure loglevel is set correctly
logger = logging.getLogger(__name__)
def get_pipeline_path(pipeline_name, working_directory):
"""Look for the pipeline in the various places it could be.
First checks the cwd
Then check cwd/pipelines
Then checks {pypyr install dir}/pipelines dir.
Args:
pipeline_name (str): Name of pipeline to find
working_directory (Path): Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines.
"""
logger.debug("starting")
logger.debug("current directory is %s", working_directory)
cwd = working_directory
# look for cwd/{pipeline_name}.yaml
pipeline_path = cwd.joinpath(f'{pipeline_name}.yaml')
if pipeline_path.is_file():
logger.debug("Found %s", pipeline_path)
else:
logger.debug("%s not found in working directory. "
"Looking in '{working dir}/pipelines' instead.",
pipeline_name)
# looking for {cwd}/pipelines/[pipeline_name].yaml
pipeline_path = cwd.joinpath('pipelines',
f'{pipeline_name}.yaml').resolve()
if pipeline_path.is_file():
logger.debug("Found %s", pipeline_path)
else:
logger.debug("%s not found in working directory/pipelines folder. "
"Looking in pypyr install directory instead.",
pipeline_name)
pypyr_dir = Path(__file__).resolve().parents[1]
logger.debug("pypyr installation directory is: %s", pypyr_dir)
pipeline_path = pypyr_dir.joinpath('pipelines',
f'{pipeline_name}.yaml')
if pipeline_path.is_file():
logger.debug("Found %s", pipeline_path)
else:
raise PipelineNotFoundError(
f"{pipeline_name}.yaml not found in any of the "
"following:\n"
f"{working_directory}\n"
f"{working_directory}/pipelines\n"
f"{pypyr_dir}/pipelines")
logger.debug("done")
return pipeline_path
def get_pipeline_definition(pipeline_name, working_dir):
"""Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/ directory, or in the
fileloader directory look-up sequence.
Args:
pipeline_name (str): Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir (path): Start looking in ./working_dir/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs.
"""
logger.debug("starting")
pipeline_path = get_pipeline_path(pipeline_name=pipeline_name,
working_directory=working_dir)
logger.debug("Trying to open pipeline at path %s", pipeline_path)
try:
with open(pipeline_path) as yaml_file:
pipeline_definition = pypyr.yaml.get_pipeline_yaml(
yaml_file)
logger.debug(
"found %d stages in pipeline.", len(pipeline_definition))
except FileNotFoundError:
logger.error(
"The pipeline doesn't exist. Looking for a file here: "
"%s", pipeline_path)
raise
logger.debug("pipeline definition loaded")
logger.debug("done")
return pipeline_definition
| 34.449153 | 79 | 0.620664 | import logging
from pathlib import Path
from pypyr.errors import PipelineNotFoundError
import pypyr.moduleloader
import pypyr.yaml
logger = logging.getLogger(__name__)
def get_pipeline_path(pipeline_name, working_directory):
logger.debug("starting")
logger.debug("current directory is %s", working_directory)
cwd = working_directory
pipeline_path = cwd.joinpath(f'{pipeline_name}.yaml')
if pipeline_path.is_file():
logger.debug("Found %s", pipeline_path)
else:
logger.debug("%s not found in working directory. "
"Looking in '{working dir}/pipelines' instead.",
pipeline_name)
pipeline_path = cwd.joinpath('pipelines',
f'{pipeline_name}.yaml').resolve()
if pipeline_path.is_file():
logger.debug("Found %s", pipeline_path)
else:
logger.debug("%s not found in working directory/pipelines folder. "
"Looking in pypyr install directory instead.",
pipeline_name)
pypyr_dir = Path(__file__).resolve().parents[1]
logger.debug("pypyr installation directory is: %s", pypyr_dir)
pipeline_path = pypyr_dir.joinpath('pipelines',
f'{pipeline_name}.yaml')
if pipeline_path.is_file():
logger.debug("Found %s", pipeline_path)
else:
raise PipelineNotFoundError(
f"{pipeline_name}.yaml not found in any of the "
"following:\n"
f"{working_directory}\n"
f"{working_directory}/pipelines\n"
f"{pypyr_dir}/pipelines")
logger.debug("done")
return pipeline_path
def get_pipeline_definition(pipeline_name, working_dir):
logger.debug("starting")
pipeline_path = get_pipeline_path(pipeline_name=pipeline_name,
working_directory=working_dir)
logger.debug("Trying to open pipeline at path %s", pipeline_path)
try:
with open(pipeline_path) as yaml_file:
pipeline_definition = pypyr.yaml.get_pipeline_yaml(
yaml_file)
logger.debug(
"found %d stages in pipeline.", len(pipeline_definition))
except FileNotFoundError:
logger.error(
"The pipeline doesn't exist. Looking for a file here: "
"%s", pipeline_path)
raise
logger.debug("pipeline definition loaded")
logger.debug("done")
return pipeline_definition
| true | true |
1c3c5bbc9ba1d110481c61250a3be4239d7529bf | 406 | py | Python | config/wsgi.py | adrianeriksen/photographic | 5418a6a79850fa887242f273a35ef9ab585d9d1a | [
"MIT"
] | null | null | null | config/wsgi.py | adrianeriksen/photographic | 5418a6a79850fa887242f273a35ef9ab585d9d1a | [
"MIT"
] | 6 | 2021-04-25T08:10:51.000Z | 2021-05-25T17:58:32.000Z | config/wsgi.py | adrianeriksen/photographic | 5418a6a79850fa887242f273a35ef9ab585d9d1a | [
"MIT"
] | null | null | null | """
WSGI config for photographic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
application = get_wsgi_application()
| 23.882353 | 78 | 0.79064 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
application = get_wsgi_application()
| true | true |
1c3c5d02b0e2c1b7cbfc4bc0c09678f02680ae2e | 10,820 | py | Python | label_studio_withoutsignin/core/old_ls_migration.py | DimaVinnitsa/label-studio | b33ef9edc5efef5f5a073e3a457832278afbf2cf | [
"Apache-2.0"
] | null | null | null | label_studio_withoutsignin/core/old_ls_migration.py | DimaVinnitsa/label-studio | b33ef9edc5efef5f5a073e3a457832278afbf2cf | [
"Apache-2.0"
] | null | null | null | label_studio_withoutsignin/core/old_ls_migration.py | DimaVinnitsa/label-studio | b33ef9edc5efef5f5a073e3a457832278afbf2cf | [
"Apache-2.0"
] | null | null | null | import pathlib
import contextlib
import datetime
import os
import io
import json
from tasks.models import Task, Annotation, Prediction
from projects.models import Project
from data_import.models import FileUpload
from core.utils.io import get_data_dir
from data_manager.models import View, FilterGroup, Filter
from django.core.files.base import File
from io_storages.gcs.models import GCSImportStorage, GCSExportStorage
from io_storages.azure_blob.models import AzureBlobImportStorage, AzureBlobExportStorage
from io_storages.s3.models import S3ImportStorage, S3ExportStorage
from io_storages.redis.models import RedisImportStorage, RedisExportStorage
from ml.models import MLBackend
from core.utils.params import get_env
@contextlib.contextmanager
def suppress_autotime(model, fields):
"""allow to keep original created_at value for auto_now_add=True field"""
_original_values = {}
for field in model._meta.local_fields:
if field.name in fields:
_original_values[field.name] = {
"auto_now": field.auto_now,
"auto_now_add": field.auto_now_add,
}
field.auto_now = False
field.auto_now_add = False
try:
yield
finally:
for field in model._meta.local_fields:
if field.name in fields:
field.auto_now = _original_values[field.name]["auto_now"]
field.auto_now_add = _original_values[field.name]["auto_now_add"]
def _migrate_tasks(project_path, project):
"""Migrate tasks from json file to database objects"""
tasks_path = project_path / "tasks.json"
with io.open(os.path.abspath(tasks_path), encoding="utf-8") as t:
tasks_data = json.load(t)
for task_id, task_data in tasks_data.items():
task = Task.objects.create(data=task_data.get("data", {}), project=project)
# migrate annotations
annotations_path = project_path / "completions" / "{}.json".format(task_id)
if annotations_path.exists():
with io.open(os.path.abspath(annotations_path), encoding="utf-8") as c:
annotations_data = json.load(c)
for annotation in annotations_data["completions"]:
task_annotation = Annotation(
result=annotation["result"],
task=task,
lead_time=annotation["lead_time"],
was_cancelled=annotation.get("was_cancelled", False),
completed_by=project.created_by,
)
with suppress_autotime(task_annotation, ["created_at"]):
task_annotation.created_at = datetime.datetime.fromtimestamp(
annotation["created_at"],
tz=datetime.datetime.now().astimezone().tzinfo,
)
task_annotation.save()
# migrate predictions
predictions_data = task_data.get("predictions", [])
for prediction in predictions_data:
task_prediction = Prediction(
result=prediction["result"], task=task, score=prediction.get("score")
)
with suppress_autotime(task_prediction, ["created_at"]):
task_prediction.created_at = datetime.datetime.fromtimestamp(
prediction["created_at"], tz=datetime.datetime.now().astimezone().tzinfo
)
task_prediction.save()
def _migrate_tabs(project_path, project):
"""Migrate tabs from tabs.json to Views table"""
tabs_path = project_path / "tabs.json"
if tabs_path.exists():
with io.open(os.path.abspath(tabs_path), encoding="utf-8") as t:
tabs_data = json.load(t)
for tab in tabs_data["tabs"]:
view = View.objects.create(project=project)
tab["id"] = view.id
ordering = tab.pop("ordering", None)
selected_items = tab.pop("selectedItems", None)
# migrate filters
filter_group = None
filters = tab.pop("filters", None)
if filters is not None:
filter_group = FilterGroup.objects.create(
conjunction=filters.get("conjunction", "and")
)
if "items" in filters:
for f in filters["items"]:
view_filter = Filter.objects.create(
**{
"column": f.get("filter", ""),
"operator": f.get("operator", ""),
"type": f.get("type", ""),
"value": f.get("value", {}),
}
)
filter_group.filters.add(view_filter)
hidden_columns = {"explore": [], "labeling": []}
hidden_columns_data = tab.pop("hiddenColumns", None)
# apply naming change to tabs internal data
if hidden_columns_data is not None:
for c in hidden_columns_data.get("explore", []):
hidden_columns["explore"].append(c.replace("completion", "annotation"))
for c in hidden_columns_data.get("labeling", []):
hidden_columns["labeling"].append(c.replace("completion", "annotation"))
tab["hiddenColumns"] = hidden_columns
view.data = tab
view.ordering = ordering
view.selected_items = selected_items
view.filter_group = filter_group
view.save()
def _migrate_storages(project, config):
"""Migrate source and target storages from config.json to database"""
# source storages migration
source = config.get("source", None)
if source:
if source.get("type") == "gcs":
params = source.get("params", {})
GCSImportStorage.objects.create(
project=project,
bucket=source.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif source.get("type") == "azure-blob":
params = source.get("params", {})
AzureBlobImportStorage.objects.create(
project=project,
container=source.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif source.get("type") == "s3":
params = source.get("params", {})
S3ImportStorage.objects.create(
project=project,
bucket=source.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
region_name=params.get("region"),
)
elif source.get("type") == "redis":
params = source.get("params", {})
RedisImportStorage.objects.create(
project=project,
path=source.get("path"),
host=params.get("host"),
port=params.get("port"),
password=params.get("password"),
db=params.get("db", 1),
)
# target storages migration
target = config.get("target", None)
if target:
if target.get("type") == "gcs":
params = target.get("params", {})
GCSExportStorage.objects.create(
project=project,
bucket=target.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif target.get("type") == "azure-blob":
params = target.get("params", {})
AzureBlobExportStorage.objects.create(
project=project,
container=target.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif target.get("type") == "s3":
params = target.get("params", {})
S3ExportStorage.objects.create(
project=project,
bucket=target.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
region_name=params.get("region"),
)
elif target.get("type") == "redis":
params = target.get("params", {})
RedisExportStorage.objects.create(
project=project,
path=target.get("path"),
host=params.get("host"),
port=params.get("port"),
password=params.get("password"),
db=params.get("db", 1),
)
def _migrate_ml_backends(project, config):
"""Migrate ml backend settings from config.json to database"""
ml_backends = config.get("ml_backends", [])
for ml_backend in ml_backends:
MLBackend.objects.create(
project=project, url=ml_backend.get("url"), title=ml_backend.get("name")
)
def _migrate_uploaded_files(project, project_path):
"""Migrate files uploaded by user"""
source_upload_path = project_path / "upload"
if not source_upload_path.exists():
return
target_upload_path = (
pathlib.Path(get_env("LABEL_STUDIO_BASE_DATA_DIR", get_data_dir())) / "upload"
)
if not target_upload_path.exists():
os.makedirs(str(target_upload_path), exist_ok=True)
src_files = os.listdir(str(source_upload_path))
for file_name in src_files:
full_file_name = os.path.join(str(source_upload_path), file_name)
with open(full_file_name, "rb") as f:
FileUpload.objects.create(
user=project.created_by, project=project, file=File(f, name=file_name)
)
def migrate_existing_project(project_path, project, config):
"""Migration projects from previous version of Label Studio"""
_migrate_tasks(project_path, project)
_migrate_tabs(project_path, project)
_migrate_storages(project, config)
_migrate_ml_backends(project, config)
_migrate_uploaded_files(project, project_path)
| 42.265625 | 96 | 0.559242 | import pathlib
import contextlib
import datetime
import os
import io
import json
from tasks.models import Task, Annotation, Prediction
from projects.models import Project
from data_import.models import FileUpload
from core.utils.io import get_data_dir
from data_manager.models import View, FilterGroup, Filter
from django.core.files.base import File
from io_storages.gcs.models import GCSImportStorage, GCSExportStorage
from io_storages.azure_blob.models import AzureBlobImportStorage, AzureBlobExportStorage
from io_storages.s3.models import S3ImportStorage, S3ExportStorage
from io_storages.redis.models import RedisImportStorage, RedisExportStorage
from ml.models import MLBackend
from core.utils.params import get_env
@contextlib.contextmanager
def suppress_autotime(model, fields):
_original_values = {}
for field in model._meta.local_fields:
if field.name in fields:
_original_values[field.name] = {
"auto_now": field.auto_now,
"auto_now_add": field.auto_now_add,
}
field.auto_now = False
field.auto_now_add = False
try:
yield
finally:
for field in model._meta.local_fields:
if field.name in fields:
field.auto_now = _original_values[field.name]["auto_now"]
field.auto_now_add = _original_values[field.name]["auto_now_add"]
def _migrate_tasks(project_path, project):
tasks_path = project_path / "tasks.json"
with io.open(os.path.abspath(tasks_path), encoding="utf-8") as t:
tasks_data = json.load(t)
for task_id, task_data in tasks_data.items():
task = Task.objects.create(data=task_data.get("data", {}), project=project)
annotations_path = project_path / "completions" / "{}.json".format(task_id)
if annotations_path.exists():
with io.open(os.path.abspath(annotations_path), encoding="utf-8") as c:
annotations_data = json.load(c)
for annotation in annotations_data["completions"]:
task_annotation = Annotation(
result=annotation["result"],
task=task,
lead_time=annotation["lead_time"],
was_cancelled=annotation.get("was_cancelled", False),
completed_by=project.created_by,
)
with suppress_autotime(task_annotation, ["created_at"]):
task_annotation.created_at = datetime.datetime.fromtimestamp(
annotation["created_at"],
tz=datetime.datetime.now().astimezone().tzinfo,
)
task_annotation.save()
predictions_data = task_data.get("predictions", [])
for prediction in predictions_data:
task_prediction = Prediction(
result=prediction["result"], task=task, score=prediction.get("score")
)
with suppress_autotime(task_prediction, ["created_at"]):
task_prediction.created_at = datetime.datetime.fromtimestamp(
prediction["created_at"], tz=datetime.datetime.now().astimezone().tzinfo
)
task_prediction.save()
def _migrate_tabs(project_path, project):
tabs_path = project_path / "tabs.json"
if tabs_path.exists():
with io.open(os.path.abspath(tabs_path), encoding="utf-8") as t:
tabs_data = json.load(t)
for tab in tabs_data["tabs"]:
view = View.objects.create(project=project)
tab["id"] = view.id
ordering = tab.pop("ordering", None)
selected_items = tab.pop("selectedItems", None)
filter_group = None
filters = tab.pop("filters", None)
if filters is not None:
filter_group = FilterGroup.objects.create(
conjunction=filters.get("conjunction", "and")
)
if "items" in filters:
for f in filters["items"]:
view_filter = Filter.objects.create(
**{
"column": f.get("filter", ""),
"operator": f.get("operator", ""),
"type": f.get("type", ""),
"value": f.get("value", {}),
}
)
filter_group.filters.add(view_filter)
hidden_columns = {"explore": [], "labeling": []}
hidden_columns_data = tab.pop("hiddenColumns", None)
if hidden_columns_data is not None:
for c in hidden_columns_data.get("explore", []):
hidden_columns["explore"].append(c.replace("completion", "annotation"))
for c in hidden_columns_data.get("labeling", []):
hidden_columns["labeling"].append(c.replace("completion", "annotation"))
tab["hiddenColumns"] = hidden_columns
view.data = tab
view.ordering = ordering
view.selected_items = selected_items
view.filter_group = filter_group
view.save()
def _migrate_storages(project, config):
source = config.get("source", None)
if source:
if source.get("type") == "gcs":
params = source.get("params", {})
GCSImportStorage.objects.create(
project=project,
bucket=source.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif source.get("type") == "azure-blob":
params = source.get("params", {})
AzureBlobImportStorage.objects.create(
project=project,
container=source.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif source.get("type") == "s3":
params = source.get("params", {})
S3ImportStorage.objects.create(
project=project,
bucket=source.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
region_name=params.get("region"),
)
elif source.get("type") == "redis":
params = source.get("params", {})
RedisImportStorage.objects.create(
project=project,
path=source.get("path"),
host=params.get("host"),
port=params.get("port"),
password=params.get("password"),
db=params.get("db", 1),
)
target = config.get("target", None)
if target:
if target.get("type") == "gcs":
params = target.get("params", {})
GCSExportStorage.objects.create(
project=project,
bucket=target.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif target.get("type") == "azure-blob":
params = target.get("params", {})
AzureBlobExportStorage.objects.create(
project=project,
container=target.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
)
elif target.get("type") == "s3":
params = target.get("params", {})
S3ExportStorage.objects.create(
project=project,
bucket=target.get("path"),
prefix=params.get("prefix"),
regex_filter=params.get("regex"),
use_blob_urls=params.get("use_blob_urls"),
region_name=params.get("region"),
)
elif target.get("type") == "redis":
params = target.get("params", {})
RedisExportStorage.objects.create(
project=project,
path=target.get("path"),
host=params.get("host"),
port=params.get("port"),
password=params.get("password"),
db=params.get("db", 1),
)
def _migrate_ml_backends(project, config):
ml_backends = config.get("ml_backends", [])
for ml_backend in ml_backends:
MLBackend.objects.create(
project=project, url=ml_backend.get("url"), title=ml_backend.get("name")
)
def _migrate_uploaded_files(project, project_path):
source_upload_path = project_path / "upload"
if not source_upload_path.exists():
return
target_upload_path = (
pathlib.Path(get_env("LABEL_STUDIO_BASE_DATA_DIR", get_data_dir())) / "upload"
)
if not target_upload_path.exists():
os.makedirs(str(target_upload_path), exist_ok=True)
src_files = os.listdir(str(source_upload_path))
for file_name in src_files:
full_file_name = os.path.join(str(source_upload_path), file_name)
with open(full_file_name, "rb") as f:
FileUpload.objects.create(
user=project.created_by, project=project, file=File(f, name=file_name)
)
def migrate_existing_project(project_path, project, config):
_migrate_tasks(project_path, project)
_migrate_tabs(project_path, project)
_migrate_storages(project, config)
_migrate_ml_backends(project, config)
_migrate_uploaded_files(project, project_path)
| true | true |
1c3c5d2b302fee99010bdaefc87f51c40da85f06 | 3,552 | py | Python | bindings/python/ensmallen/datasets/string/buchneraaphidicolabcc.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/buchneraaphidicolabcc.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/buchneraaphidicolabcc.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Buchnera aphidicola BCc.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def BuchneraAphidicolaBcc(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Buchnera aphidicola BCc graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Buchnera aphidicola BCc graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="BuchneraAphidicolaBcc",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.888889 | 223 | 0.677083 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def BuchneraAphidicolaBcc(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="BuchneraAphidicolaBcc",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
1c3c5da7ca43b5a9e32c72610052150094a17417 | 20,518 | py | Python | roxbot/cogs/image.py | rdnetto/roxbot | 8b59126272704ffbb0986b3b410502a4ea9cb2ed | [
"MIT"
] | 23 | 2018-05-07T11:04:27.000Z | 2021-03-19T15:26:04.000Z | roxbot/cogs/image.py | rdnetto/roxbot | 8b59126272704ffbb0986b3b410502a4ea9cb2ed | [
"MIT"
] | 56 | 2018-05-27T20:35:42.000Z | 2021-08-11T13:29:44.000Z | roxbot/cogs/image.py | rdnetto/roxbot | 8b59126272704ffbb0986b3b410502a4ea9cb2ed | [
"MIT"
] | 8 | 2018-04-15T11:32:57.000Z | 2021-05-13T16:40:34.000Z | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017-2018 Roxanne Gibson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import os
import random
import discord
import numpy as np
from PIL import Image, ImageEnhance
from discord.ext import commands
import roxbot
class Flag:
"""Class to produce pride flags for the filters in Roxbot."""
def __init__(self, colours=None, ratio=None, name=""):
self.name = name
self.rows = len(colours)
self.colours = colours
self.ratio = ratio or tuple([(1/self.rows)]*self.rows) # Custom ratio is here for things like the bi pride flag
@classmethod
def lgbt(cls):
name = "lgbt"
red = (243, 28, 28)
orange = (255, 196, 0)
yellow = (255, 247, 0)
green = (0, 188, 108)
blue = (0, 149, 255)
violet = (181, 46, 193)
colours = (red, orange, yellow, green, blue, violet)
return cls(colours=colours, name=name)
@classmethod
def trans(cls):
name = "trans"
blue = (91, 206, 250)
pink = (245, 169, 184)
white = (255, 255, 255)
colours = (blue, pink, white, pink, blue)
return cls(colours=colours, name=name)
@classmethod
def non_binary(cls):
name = "nb"
yellow = (255, 244, 51)
white = (255, 255, 255)
purple = (155, 89, 208)
grey = (45, 45, 45)
colours = (yellow, white, purple, grey)
return cls(colours=colours, name=name)
@classmethod
def bi(cls):
name = "bi"
ratio = (0.4, 0.2, 0.4)
pink = (215, 2, 112)
lavender = (115, 79, 150)
blue = (0, 56, 168)
colours = (pink, lavender, blue)
return cls(colours=colours, ratio=ratio, name=name)
@classmethod
def pan(cls):
name = "pan"
pink = (255, 33, 140)
yellow = (255, 216, 0)
blue = (33, 177, 255)
colours = (pink, yellow, blue)
return cls(colours=colours, name=name)
@classmethod
def ace(cls):
name = "ace"
black = (0, 0, 0)
grey = (163, 163, 163)
white = (255, 255, 255)
purple = (128, 0, 128)
colours = (black, grey, white, purple)
return cls(colours=colours, name=name)
@classmethod
def gq(cls):
name = "gq"
purple = (181, 126, 220)
white = (255, 255, 255)
green = (74, 129, 35)
colours = (purple, white, green)
return cls(colours=colours, name=name)
@classmethod
def gf(cls):
name = "genderflu"
pink = (255, 117, 162)
white = (255, 255, 255)
purple = (190, 24, 214)
black = (0, 0, 0)
blue = (51, 62, 189)
colours = (pink, white, purple, black, blue)
return cls(colours=colours, name=name)
@classmethod
def agender(cls):
name = "agender"
black = (0, 0, 0)
white = (255, 255, 255)
grey = (185, 185, 185)
green = (176, 244, 141)
colours = (black, grey, white, green, white, grey, black)
return cls(colours=colours, name=name)
@classmethod
def aro(cls):
name = "aro"
green = (61, 165, 66)
ltgreen = (167, 212, 121)
white = (255, 255, 255)
grey = (169, 169, 169)
black = (0, 0, 0)
colours = (green, ltgreen, white, grey, black)
return cls(colours=colours, name=name)
@classmethod
def demigirl(cls):
name = "demigirl"
grey = (128, 128, 128)
silver = (196, 196, 196)
pink = (254, 176, 202)
white = (255, 255, 255)
colours = (grey, silver, pink, white, pink, silver, grey)
return cls(colours=colours, name=name)
@classmethod
def demiboy(cls):
name = "demiboy"
grey = (128, 128, 128)
silver = (196, 196, 196)
blue = (155, 218, 235)
white = (255, 255, 255)
colours = (grey, silver, blue, white, blue, silver, grey)
return cls(colours=colours, name=name)
@classmethod
def deminb(cls):
name = "deminb"
grey = (128, 128, 128)
silver = (196, 196, 196)
yellow = (251, 255, 117)
white = (255, 255, 255)
colours = (grey, silver, yellow, white, yellow, silver, grey)
return cls(colours=colours, name=name)
@classmethod
def polygender(cls):
name = "polygender"
black = (0, 0, 0)
grey = (147, 147, 147)
pink = (237, 148, 197)
yellow = (245, 237, 129)
blue = (100, 187, 230)
colours = (black, grey, pink, yellow, blue)
return cls(colours=colours, name=name)
@classmethod
def polysexual(cls):
name = "polysexual"
pink = (246, 22, 186)
green = (0, 214, 105)
blue = (21, 147, 246)
colours = (pink, green, blue)
return cls(colours=colours, name=name)
class ImageEditor(commands.Cog):
"""The ImageEditor cog is a cog with multiple commands to manipulate images provided by the user."""
def __init__(self, bot_client):
self.bot = bot_client
@staticmethod
def image_lookup(message):
try:
if message.attachments[0].height: # Check if attachment is image
return message.attachments[0].url
except IndexError:
return message.author.avatar_url_as(format="png")
@staticmethod
def add_grain(img, prob=0.2, opacity=30):
"""
Adds salt and pepper grain to the given image.
:param img: :type PIL.Image: Image to add grain to
:param prob: :type float: Probability of a pixel being black between 0-1
:param opacity: :type int: opacity of the grain when composite with the given image between 0%-100%
:return: :type PIL.Image: Image with added grain
"""
img_matrix = np.zeros((img.height, img.width), dtype=np.uint8)
for y in range(img.height):
for x in range(img.width):
if prob < random.random():
img_matrix[y][x] = 255
noisy = Image.fromarray(img_matrix, "L")
noisy = noisy.convert("RGB")
mask = Image.new('RGBA', img.size, (0, 0, 0, opacity))
return Image.composite(noisy, img, mask)
@staticmethod
async def flag_filter(flag, url):
"""At the moment, can only make horizontal stripe flags"""
f = 'filter_{}.png'.format(flag.name)
await roxbot.http.download_file(url, f)
ava = Image.open(f)
top = 0 # In the box we use, top is used to define which part of the image we are working on
bottom = 0 # And bottom defines the height. That should help you visualise why I increment the values the way I do
for x, colour in enumerate(flag.colours):
# Grab the next slice of the images height and width
# we use math.ceil here to avoid rounding errors when converting float to int
height = int(math.ceil(ava.height * flag.ratio[x]))
width = ava.width
bottom += height
box = (0, top, width, bottom)
# Make the colour block and the transparency mask at the slice size. Then crop the next part of the image
row = Image.new('RGB', (width, height), colour)
mask = Image.new('RGBA', (width, height), (0, 0, 0, 123))
crop = ava.crop(box)
# Combine all three and paste it back into original image
part = Image.composite(crop, row, mask)
ava.paste(part, box)
top += height
os.remove(f)
ava.save(f)
file = discord.File(f)
return file
async def image_logging(self, ctx, output):
"""Logging function for all image commands to avoid shit loads or repeating code.
Required because image has outputs that are user decided and therefore could need logging for."""
if isinstance(ctx.channel, discord.TextChannel):
return await self.bot.log(
ctx.guild,
"image",
User=ctx.author,
User_ID=ctx.author.id,
Output_Message_ID=output.id,
Channel=ctx.channel,
Channel_Mention=ctx.channel.mention,
Time="{:%a %Y/%m/%d %H:%M:%S} UTC".format(ctx.message.created_at)
)
@commands.group(case_insensitive=True)
async def pride(self, ctx):
"""`;pride` is a command group for multiple pride flag filters."""
if ctx.invoked_subcommand is None:
raise commands.CommandNotFound("Subcommand '{}' does not exist.".format(ctx.subcommand_passed))
async def pride_flag_posting(self, ctx, flag, image):
async with ctx.typing():
file = await self.flag_filter(flag, image)
output = await ctx.send(file=file)
os.remove(file.filename)
await self.image_logging(ctx, output)
@pride.command()
async def lgbt(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Adds a LGBT Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.lgbt()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["trans"])
async def transgender(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Adds a Trans Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.trans()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["nb", "enby"])
async def nonbinary(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Adds a Non-Binary Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.non_binary()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["bi"])
async def bisexual(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Adds a Bisexual Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.bi()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["gq"])
async def genderqueer(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Adds a Gender Queer Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.gq()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["pan"])
async def pansexual(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Adds a Pansexual Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.pan()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["ace"])
async def asexual(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Adds an Asexual Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.ace()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["gf"])
async def genderfluid(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds a Gender Fluid Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.gf()
await self.pride_flag_posting(ctx, flag, image)
@pride.command()
async def agender(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds an Agender Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.agender()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["aro"])
async def aromantic(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds a Aromantic Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.aro()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def demigirl(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds a Demi Girl Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.demigirl()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def demiboy(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds a Demi Boy Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.demiboy()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["deminonbinary"])
async def deminb(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds a Demi non-binary Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.deminb()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def polygender(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds a Polygender Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.polygender()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def polysexual(self, ctx, image: roxbot.converters.AvatarURL = None):
"""Adds a Polysexual Pride Flag filter to the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.polysexual()
await self.pride_flag_posting(ctx, flag, image)
@commands.command(aliases=["df"])
async def deepfry(self, ctx, image: roxbot.converters.AvatarURL=None):
"""Deepfrys the given image
Args:
image: Optional
If nothing, your avatar
Mention a user, their avatar
Provide a URL, that image
Provide an image via upload, that image.
"""
if not image:
image = self.image_lookup(ctx.message)
filename = await roxbot.http.download_file(image)
async with ctx.typing():
# Convert to jpg
if filename.split(".")[-1] != "jpg":
jpg_name = filename.split(".")[0] + ".jpg"
img = Image.open(filename)
img = img.convert(mode="RGB")
img.save(jpg_name)
os.remove(filename)
else:
jpg_name = filename
img = Image.open(jpg_name)
# Brightness Enhance
ehn = ImageEnhance.Brightness(img)
img = ehn.enhance(1.25)
# Contrast Enhance
ehn = ImageEnhance.Contrast(img)
img = ehn.enhance(1.5)
# Sharpness Enhance
ehn = ImageEnhance.Sharpness(img)
img = ehn.enhance(20)
# Saturation Enhance
ehn = ImageEnhance.Color(img)
img = ehn.enhance(2)
# Add Salt and Pepper Noise
img = self.add_grain(img)
img.save(jpg_name)
# JPG-fy image
for x in range(20):
img = Image.open(jpg_name)
img = img.convert(mode="RGB")
img.save(jpg_name)
output = await ctx.send(file=discord.File(jpg_name))
os.remove(jpg_name)
await self.image_logging(ctx, output)
def setup(bot_client):
bot_client.add_cog(ImageEditor(bot_client))
| 34.311037 | 123 | 0.571937 |
import math
import os
import random
import discord
import numpy as np
from PIL import Image, ImageEnhance
from discord.ext import commands
import roxbot
class Flag:
def __init__(self, colours=None, ratio=None, name=""):
self.name = name
self.rows = len(colours)
self.colours = colours
self.ratio = ratio or tuple([(1/self.rows)]*self.rows)
@classmethod
def lgbt(cls):
name = "lgbt"
red = (243, 28, 28)
orange = (255, 196, 0)
yellow = (255, 247, 0)
green = (0, 188, 108)
blue = (0, 149, 255)
violet = (181, 46, 193)
colours = (red, orange, yellow, green, blue, violet)
return cls(colours=colours, name=name)
@classmethod
def trans(cls):
name = "trans"
blue = (91, 206, 250)
pink = (245, 169, 184)
white = (255, 255, 255)
colours = (blue, pink, white, pink, blue)
return cls(colours=colours, name=name)
@classmethod
def non_binary(cls):
name = "nb"
yellow = (255, 244, 51)
white = (255, 255, 255)
purple = (155, 89, 208)
grey = (45, 45, 45)
colours = (yellow, white, purple, grey)
return cls(colours=colours, name=name)
@classmethod
def bi(cls):
name = "bi"
ratio = (0.4, 0.2, 0.4)
pink = (215, 2, 112)
lavender = (115, 79, 150)
blue = (0, 56, 168)
colours = (pink, lavender, blue)
return cls(colours=colours, ratio=ratio, name=name)
@classmethod
def pan(cls):
name = "pan"
pink = (255, 33, 140)
yellow = (255, 216, 0)
blue = (33, 177, 255)
colours = (pink, yellow, blue)
return cls(colours=colours, name=name)
@classmethod
def ace(cls):
name = "ace"
black = (0, 0, 0)
grey = (163, 163, 163)
white = (255, 255, 255)
purple = (128, 0, 128)
colours = (black, grey, white, purple)
return cls(colours=colours, name=name)
@classmethod
def gq(cls):
name = "gq"
purple = (181, 126, 220)
white = (255, 255, 255)
green = (74, 129, 35)
colours = (purple, white, green)
return cls(colours=colours, name=name)
@classmethod
def gf(cls):
name = "genderflu"
pink = (255, 117, 162)
white = (255, 255, 255)
purple = (190, 24, 214)
black = (0, 0, 0)
blue = (51, 62, 189)
colours = (pink, white, purple, black, blue)
return cls(colours=colours, name=name)
@classmethod
def agender(cls):
name = "agender"
black = (0, 0, 0)
white = (255, 255, 255)
grey = (185, 185, 185)
green = (176, 244, 141)
colours = (black, grey, white, green, white, grey, black)
return cls(colours=colours, name=name)
@classmethod
def aro(cls):
name = "aro"
green = (61, 165, 66)
ltgreen = (167, 212, 121)
white = (255, 255, 255)
grey = (169, 169, 169)
black = (0, 0, 0)
colours = (green, ltgreen, white, grey, black)
return cls(colours=colours, name=name)
@classmethod
def demigirl(cls):
name = "demigirl"
grey = (128, 128, 128)
silver = (196, 196, 196)
pink = (254, 176, 202)
white = (255, 255, 255)
colours = (grey, silver, pink, white, pink, silver, grey)
return cls(colours=colours, name=name)
@classmethod
def demiboy(cls):
name = "demiboy"
grey = (128, 128, 128)
silver = (196, 196, 196)
blue = (155, 218, 235)
white = (255, 255, 255)
colours = (grey, silver, blue, white, blue, silver, grey)
return cls(colours=colours, name=name)
@classmethod
def deminb(cls):
name = "deminb"
grey = (128, 128, 128)
silver = (196, 196, 196)
yellow = (251, 255, 117)
white = (255, 255, 255)
colours = (grey, silver, yellow, white, yellow, silver, grey)
return cls(colours=colours, name=name)
@classmethod
def polygender(cls):
name = "polygender"
black = (0, 0, 0)
grey = (147, 147, 147)
pink = (237, 148, 197)
yellow = (245, 237, 129)
blue = (100, 187, 230)
colours = (black, grey, pink, yellow, blue)
return cls(colours=colours, name=name)
@classmethod
def polysexual(cls):
name = "polysexual"
pink = (246, 22, 186)
green = (0, 214, 105)
blue = (21, 147, 246)
colours = (pink, green, blue)
return cls(colours=colours, name=name)
class ImageEditor(commands.Cog):
def __init__(self, bot_client):
self.bot = bot_client
@staticmethod
def image_lookup(message):
try:
if message.attachments[0].height:
return message.attachments[0].url
except IndexError:
return message.author.avatar_url_as(format="png")
@staticmethod
def add_grain(img, prob=0.2, opacity=30):
img_matrix = np.zeros((img.height, img.width), dtype=np.uint8)
for y in range(img.height):
for x in range(img.width):
if prob < random.random():
img_matrix[y][x] = 255
noisy = Image.fromarray(img_matrix, "L")
noisy = noisy.convert("RGB")
mask = Image.new('RGBA', img.size, (0, 0, 0, opacity))
return Image.composite(noisy, img, mask)
@staticmethod
async def flag_filter(flag, url):
f = 'filter_{}.png'.format(flag.name)
await roxbot.http.download_file(url, f)
ava = Image.open(f)
top = 0
bottom = 0
for x, colour in enumerate(flag.colours):
height = int(math.ceil(ava.height * flag.ratio[x]))
width = ava.width
bottom += height
box = (0, top, width, bottom)
row = Image.new('RGB', (width, height), colour)
mask = Image.new('RGBA', (width, height), (0, 0, 0, 123))
crop = ava.crop(box)
part = Image.composite(crop, row, mask)
ava.paste(part, box)
top += height
os.remove(f)
ava.save(f)
file = discord.File(f)
return file
async def image_logging(self, ctx, output):
if isinstance(ctx.channel, discord.TextChannel):
return await self.bot.log(
ctx.guild,
"image",
User=ctx.author,
User_ID=ctx.author.id,
Output_Message_ID=output.id,
Channel=ctx.channel,
Channel_Mention=ctx.channel.mention,
Time="{:%a %Y/%m/%d %H:%M:%S} UTC".format(ctx.message.created_at)
)
@commands.group(case_insensitive=True)
async def pride(self, ctx):
if ctx.invoked_subcommand is None:
raise commands.CommandNotFound("Subcommand '{}' does not exist.".format(ctx.subcommand_passed))
async def pride_flag_posting(self, ctx, flag, image):
async with ctx.typing():
file = await self.flag_filter(flag, image)
output = await ctx.send(file=file)
os.remove(file.filename)
await self.image_logging(ctx, output)
@pride.command()
async def lgbt(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.lgbt()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["trans"])
async def transgender(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.trans()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["nb", "enby"])
async def nonbinary(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.non_binary()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["bi"])
async def bisexual(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.bi()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["gq"])
async def genderqueer(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.gq()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["pan"])
async def pansexual(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.pan()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["ace"])
async def asexual(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.ace()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["gf"])
async def genderfluid(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.gf()
await self.pride_flag_posting(ctx, flag, image)
@pride.command()
async def agender(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.agender()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["aro"])
async def aromantic(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.aro()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def demigirl(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.demigirl()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def demiboy(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.demiboy()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=["deminonbinary"])
async def deminb(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.deminb()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def polygender(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.polygender()
await self.pride_flag_posting(ctx, flag, image)
@pride.command(aliases=[])
async def polysexual(self, ctx, image: roxbot.converters.AvatarURL = None):
if not image:
image = self.image_lookup(ctx.message)
flag = Flag.polysexual()
await self.pride_flag_posting(ctx, flag, image)
@commands.command(aliases=["df"])
async def deepfry(self, ctx, image: roxbot.converters.AvatarURL=None):
if not image:
image = self.image_lookup(ctx.message)
filename = await roxbot.http.download_file(image)
async with ctx.typing():
if filename.split(".")[-1] != "jpg":
jpg_name = filename.split(".")[0] + ".jpg"
img = Image.open(filename)
img = img.convert(mode="RGB")
img.save(jpg_name)
os.remove(filename)
else:
jpg_name = filename
img = Image.open(jpg_name)
ehn = ImageEnhance.Brightness(img)
img = ehn.enhance(1.25)
ehn = ImageEnhance.Contrast(img)
img = ehn.enhance(1.5)
ehn = ImageEnhance.Sharpness(img)
img = ehn.enhance(20)
ehn = ImageEnhance.Color(img)
img = ehn.enhance(2)
img = self.add_grain(img)
img.save(jpg_name)
for x in range(20):
img = Image.open(jpg_name)
img = img.convert(mode="RGB")
img.save(jpg_name)
output = await ctx.send(file=discord.File(jpg_name))
os.remove(jpg_name)
await self.image_logging(ctx, output)
def setup(bot_client):
bot_client.add_cog(ImageEditor(bot_client))
| true | true |
1c3c5e4eadf2c53254f474f792f65053d7dd8804 | 1,105 | py | Python | setup.py | sprymix/csscompressor | 0857438db725d5c1d2672f45d9cf3e7dc14646a4 | [
"BSD-3-Clause"
] | 38 | 2015-05-22T18:55:52.000Z | 2022-03-05T21:18:58.000Z | setup.py | sprymix/csscompressor | 0857438db725d5c1d2672f45d9cf3e7dc14646a4 | [
"BSD-3-Clause"
] | 8 | 2015-08-18T04:31:11.000Z | 2022-01-28T16:55:33.000Z | setup.py | sprymix/csscompressor | 0857438db725d5c1d2672f45d9cf3e7dc14646a4 | [
"BSD-3-Clause"
] | 10 | 2015-01-04T14:14:05.000Z | 2020-09-03T18:32:02.000Z | from setuptools import setup
extra = {}
f = open('README.rst', 'r')
try:
extra['long_description'] = f.read()
finally:
f.close()
deps = []
try:
import argparse
except ImportError:
deps.append('argparse')
setup(
name='csscompressor',
version='0.9.5',
url='http://github.com/sprymix/csscompressor',
license='BSD',
author='Yury Selivanov',
author_email='info@sprymix.com',
description='A python port of YUI CSS Compressor',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.6',
'Topic :: Software Development :: Build Tools',
'Topic :: Text Processing :: General',
'Topic :: Utilities'
],
packages=['csscompressor', 'csscompressor.tests'],
install_requires=deps,
**extra
)
| 25.113636 | 55 | 0.611765 | from setuptools import setup
extra = {}
f = open('README.rst', 'r')
try:
extra['long_description'] = f.read()
finally:
f.close()
deps = []
try:
import argparse
except ImportError:
deps.append('argparse')
setup(
name='csscompressor',
version='0.9.5',
url='http://github.com/sprymix/csscompressor',
license='BSD',
author='Yury Selivanov',
author_email='info@sprymix.com',
description='A python port of YUI CSS Compressor',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.6',
'Topic :: Software Development :: Build Tools',
'Topic :: Text Processing :: General',
'Topic :: Utilities'
],
packages=['csscompressor', 'csscompressor.tests'],
install_requires=deps,
**extra
)
| true | true |
1c3c5ec734fa76d1985b40465b2b2eedcf0269a3 | 973 | py | Python | capa/features/basicblock.py | clayne/capa | e47f5a2548443509b0f7f251fb10ae3eadb34a88 | [
"Apache-2.0"
] | 1,887 | 2020-07-16T19:43:23.000Z | 2021-09-23T08:40:43.000Z | capa/features/basicblock.py | clayne/capa | e47f5a2548443509b0f7f251fb10ae3eadb34a88 | [
"Apache-2.0"
] | 440 | 2020-07-16T19:02:32.000Z | 2021-09-23T08:46:44.000Z | capa/features/basicblock.py | clayne/capa | e47f5a2548443509b0f7f251fb10ae3eadb34a88 | [
"Apache-2.0"
] | 304 | 2020-07-16T19:45:49.000Z | 2021-09-18T00:15:41.000Z | # Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from capa.features.common import Feature
class BasicBlock(Feature):
def __init__(self):
super(BasicBlock, self).__init__(None)
def __str__(self):
return "basic block"
def get_value_str(self):
return ""
def freeze_serialize(self):
return (self.__class__.__name__, [])
@classmethod
def freeze_deserialize(cls, args):
return cls()
| 34.75 | 111 | 0.722508 |
from capa.features.common import Feature
class BasicBlock(Feature):
def __init__(self):
super(BasicBlock, self).__init__(None)
def __str__(self):
return "basic block"
def get_value_str(self):
return ""
def freeze_serialize(self):
return (self.__class__.__name__, [])
@classmethod
def freeze_deserialize(cls, args):
return cls()
| true | true |
1c3c5f7137ddcdfb0b5c4fed0f3ddf3de7a4ad13 | 2,525 | py | Python | cookbook/core/flyte_basics/task_cache.py | mayitbeegh/flytesnacks | 35fe9db45f08fce3d94923b4245b1a9980a915ef | [
"Apache-2.0"
] | null | null | null | cookbook/core/flyte_basics/task_cache.py | mayitbeegh/flytesnacks | 35fe9db45f08fce3d94923b4245b1a9980a915ef | [
"Apache-2.0"
] | 18 | 2021-08-25T15:15:22.000Z | 2022-03-12T01:03:57.000Z | cookbook/core/flyte_basics/task_cache.py | mayitbeegh/flytesnacks | 35fe9db45f08fce3d94923b4245b1a9980a915ef | [
"Apache-2.0"
] | null | null | null | """
Task Cache
----------------
Flyte provides the ability to cache the output of task executions in order to make subsequent executions faster. A well-behaved Flyte Task should generate deterministic output given the same inputs and task functionality.
This is useful in situations where a user knows that many executions with the exact same inputs can occur. For example, your task may be periodically run on a schedule, run multiple times when debugging workflows, or
commonly shared across different workflows but receive the same inputs.
"""
# %%
# For any task in flyte, there is always one required import
from flytekit import task
# %%
# Task caching is disabled by default. This is to avoid unintended consequences of caching tasks with side-effects. To enable caching and control its behavior, use the `cache` and `cache_version` parameters when constructing
# a task. `cache` controls whether caching is enabled or disabled overall and `cache_version` controls which version of the cache is used. Bumping this version is akin to invalidating the cache, the next execution of that task
# with the same input/interface will actually run, and the results will be cached so that subsequent executions are just replayed from the cache.
#
# :py:func:`flytekit.task`
@task(cache=True, cache_version="1.0")
def square(n: int) -> int:
"""
Parameters:
n (int): name of the parameter for the task will be derived from the name of the input variable
the type will be automatically deduced to be Types.Integer
Return:
int: The label for the output will be automatically assigned and type will be deduced from the annotation
"""
return n * n
# %%
# In the above example, calling `square(n=2)` twice (even if it's across # different executions or different workflows), will only actually execute the multiplication operation once. The second time the output will be made
# available immediately - (captured from the previous execution with the same inputs)
# %%
# If, in a subsequent code update, we update the signature of the task to return the original number along with the result, it'll automatically invalidate the cache (even though the cache version remains the same). ::
#
# :py:func:`flytekit.task`
# @task(cache=True, cache_version="1.0")
# def square(n: int) -> (int, int):
# ...
# %%
# To read more about Task caching and how a unique signature is calculated, please proceed to the `Task Cache documentation <please insert correct link>`__.
| 51.530612 | 226 | 0.746931 |
from flytekit import task
@task(cache=True, cache_version="1.0")
def square(n: int) -> int:
return n * n
# available immediately - (captured from the previous execution with the same inputs)
# %%
# If, in a subsequent code update, we update the signature of the task to return the original number along with the result, it'll automatically invalidate the cache (even though the cache version remains the same). ::
| true | true |
1c3c61feb17b029e6e68fc2b7b1aaca37fae0aed | 675 | py | Python | doc/source/devel/examples/label04.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | null | null | null | doc/source/devel/examples/label04.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | 1 | 2020-02-28T16:36:04.000Z | 2020-03-02T07:51:12.000Z | doc/source/devel/examples/label04.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | null | null | null | import sys
from taurus.external.qt import Qt
from taurus.qt.qtgui.container import TaurusWidget
from taurus.qt.qtgui.display import TaurusLabel
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(sys.argv, cmd_line_parser=None)
panel = TaurusWidget()
layout = Qt.QVBoxLayout()
panel.setLayout(layout)
panel.setModel('sys/taurustest/1')
w1 = TaurusLabel()
w2 = TaurusLabel()
w3 = TaurusLabel()
w1.setModel('sys/taurustest/1/state')
w2.setModel('sys/taurustest/1/position')
w3.setModel('sys/taurustest/1/simulationmode')
w1.setShowQuality(False)
layout.addWidget(w1)
layout.addWidget(w2)
layout.addWidget(w3)
panel.show()
sys.exit(app.exec_())
| 25.961538 | 57 | 0.794074 | import sys
from taurus.external.qt import Qt
from taurus.qt.qtgui.container import TaurusWidget
from taurus.qt.qtgui.display import TaurusLabel
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(sys.argv, cmd_line_parser=None)
panel = TaurusWidget()
layout = Qt.QVBoxLayout()
panel.setLayout(layout)
panel.setModel('sys/taurustest/1')
w1 = TaurusLabel()
w2 = TaurusLabel()
w3 = TaurusLabel()
w1.setModel('sys/taurustest/1/state')
w2.setModel('sys/taurustest/1/position')
w3.setModel('sys/taurustest/1/simulationmode')
w1.setShowQuality(False)
layout.addWidget(w1)
layout.addWidget(w2)
layout.addWidget(w3)
panel.show()
sys.exit(app.exec_())
| true | true |
1c3c6255f57d07dd08df4f7ffbc40ddf5b4d0cbe | 3,143 | py | Python | compro/atcoder_problems/fetch_specific_difficulty_problem_lists.py | tsutaj/sandbox | c7046f2973ce23f84085c6697c6752483cdcda71 | [
"MIT"
] | null | null | null | compro/atcoder_problems/fetch_specific_difficulty_problem_lists.py | tsutaj/sandbox | c7046f2973ce23f84085c6697c6752483cdcda71 | [
"MIT"
] | null | null | null | compro/atcoder_problems/fetch_specific_difficulty_problem_lists.py | tsutaj/sandbox | c7046f2973ce23f84085c6697c6752483cdcda71 | [
"MIT"
] | null | null | null | import argparse
import logging
import pandas as pd
import pathlib
import requests
diff_info = ("https://kenkoooo.com/atcoder/resources/problem-models.json", "./difficulty.json")
prob_info = ("https://kenkoooo.com/atcoder/resources/problems.json", "./problems.json")
result_json_info = ("hoge", "./result.json")
result_csv_info = ("hoge", "./result.csv")
def set_jsonfile(json_info, orient_index=True):
orient_option = "columns"
if orient_index:
orient_option = "index"
if pathlib.Path(json_info[1]).exists():
df = pd.read_json(json_info[1])
else:
logging.warning("{} does not exist. fetching file from '{}' ...".format(json_info[1], json_info[0]))
df = pd.DataFrame.from_dict(
requests.get(json_info[0]).json(),
orient=orient_option,
)
return df
def save_jsonfile(df, json_info):
if not pathlib.Path(json_info[1]).exists():
df.to_json(json_info[1])
else:
logging.warning("{} already exists. do nothing ...".format(json_info[1]))
def save_csvfile(df, csv_info):
if not pathlib.Path(csv_info[1]).exists():
df.to_csv(csv_info[1])
else:
logging.warning("{} already exists. do nothing ...".format(csv_info[1]))
def create_problem_url(contest_id, problem_id):
return "https://atcoder.jp/contests/" + contest_id + "/tasks/" + problem_id
def main(min_difficulty, max_difficulty):
"""Find problems whose difficulty are in [min_difficulty, max_difficulty)."""
diff_df = set_jsonfile(diff_info, True)
prob_df = set_jsonfile(prob_info, False)
save_jsonfile(diff_df, diff_info)
save_jsonfile(prob_df, prob_info)
# difficulty が NaN のものは除外
diff_df = diff_df.dropna(subset=["difficulty"])
# 指定した範囲の difficulty を持つもののみ残す
diff_df = diff_df[ (diff_df["difficulty"] >= min_difficulty) \
& (diff_df["difficulty"] < max_difficulty) ]
# 不要な列を消す
diff_df = diff_df.drop(columns=[
"slope",
"intercept",
"variance",
"discrimination",
"irt_loglikelihood",
"irt_users"
])
# カラムを追加
diff_df["title"] = "unknown"
diff_df["contest_id"] = "unknown"
diff_df["url"] = "unknown"
# 問題名を入れる
prob_df = prob_df.set_index("id")
for prob_id in list(diff_df.index.values):
diff_df.at[prob_id, "title"] = prob_df.at[prob_id, "title"]
contest_id = prob_df.at[prob_id, "contest_id"]
problem_url = create_problem_url(contest_id, prob_id)
diff_df.at[prob_id, "url"] = problem_url
# 列を入れ替える
columns_diff = ["title", "url", "difficulty", "is_experimental"]
diff_df = diff_df.reindex(columns=columns_diff)
print(diff_df)
save_jsonfile(diff_df, result_json_info)
save_csvfile(diff_df, result_csv_info)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-min", "--min-difficulty", default=1600, type=int)
parser.add_argument("-max", "--max-difficulty", default=2400, type=int)
args = parser.parse_args()
print(args)
main(args.min_difficulty, args.max_difficulty)
| 31.747475 | 108 | 0.655425 | import argparse
import logging
import pandas as pd
import pathlib
import requests
diff_info = ("https://kenkoooo.com/atcoder/resources/problem-models.json", "./difficulty.json")
prob_info = ("https://kenkoooo.com/atcoder/resources/problems.json", "./problems.json")
result_json_info = ("hoge", "./result.json")
result_csv_info = ("hoge", "./result.csv")
def set_jsonfile(json_info, orient_index=True):
orient_option = "columns"
if orient_index:
orient_option = "index"
if pathlib.Path(json_info[1]).exists():
df = pd.read_json(json_info[1])
else:
logging.warning("{} does not exist. fetching file from '{}' ...".format(json_info[1], json_info[0]))
df = pd.DataFrame.from_dict(
requests.get(json_info[0]).json(),
orient=orient_option,
)
return df
def save_jsonfile(df, json_info):
if not pathlib.Path(json_info[1]).exists():
df.to_json(json_info[1])
else:
logging.warning("{} already exists. do nothing ...".format(json_info[1]))
def save_csvfile(df, csv_info):
if not pathlib.Path(csv_info[1]).exists():
df.to_csv(csv_info[1])
else:
logging.warning("{} already exists. do nothing ...".format(csv_info[1]))
def create_problem_url(contest_id, problem_id):
return "https://atcoder.jp/contests/" + contest_id + "/tasks/" + problem_id
def main(min_difficulty, max_difficulty):
diff_df = set_jsonfile(diff_info, True)
prob_df = set_jsonfile(prob_info, False)
save_jsonfile(diff_df, diff_info)
save_jsonfile(prob_df, prob_info)
diff_df = diff_df.dropna(subset=["difficulty"])
diff_df = diff_df[ (diff_df["difficulty"] >= min_difficulty) \
& (diff_df["difficulty"] < max_difficulty) ]
diff_df = diff_df.drop(columns=[
"slope",
"intercept",
"variance",
"discrimination",
"irt_loglikelihood",
"irt_users"
])
diff_df["title"] = "unknown"
diff_df["contest_id"] = "unknown"
diff_df["url"] = "unknown"
prob_df = prob_df.set_index("id")
for prob_id in list(diff_df.index.values):
diff_df.at[prob_id, "title"] = prob_df.at[prob_id, "title"]
contest_id = prob_df.at[prob_id, "contest_id"]
problem_url = create_problem_url(contest_id, prob_id)
diff_df.at[prob_id, "url"] = problem_url
columns_diff = ["title", "url", "difficulty", "is_experimental"]
diff_df = diff_df.reindex(columns=columns_diff)
print(diff_df)
save_jsonfile(diff_df, result_json_info)
save_csvfile(diff_df, result_csv_info)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-min", "--min-difficulty", default=1600, type=int)
parser.add_argument("-max", "--max-difficulty", default=2400, type=int)
args = parser.parse_args()
print(args)
main(args.min_difficulty, args.max_difficulty)
| true | true |
1c3c62f47a08a5db3dccc4e6fb2f59b512a6b691 | 4,567 | py | Python | gdsfactory/types.py | gdsfactory/gdsfactory | e53b1f3415a81862d465e0443fc09fb35d14d1e0 | [
"MIT"
] | 42 | 2020-05-25T09:33:45.000Z | 2022-03-29T03:41:19.000Z | gdsfactory/types.py | gdsfactory/gdsfactory | e53b1f3415a81862d465e0443fc09fb35d14d1e0 | [
"MIT"
] | 133 | 2020-05-28T18:29:04.000Z | 2022-03-31T22:21:42.000Z | gdsfactory/types.py | gdsfactory/gdsfactory | e53b1f3415a81862d465e0443fc09fb35d14d1e0 | [
"MIT"
] | 17 | 2020-06-30T07:07:50.000Z | 2022-03-17T15:45:27.000Z | """In programming, a factory is a function that returns an Object.
Functions are easy to understand because they have clear inputs and outputs.
Most gdsfactory functions take some inputs and return a Component object.
Some of these inputs parameters are also functions.
- Component: Object with.
- name
- references to other components (x, y, rotation)
- polygons in different layers
- ports dictionary
- ComponentFactory: function that returns a Component.
- Route: dataclass with 3 attributes.
- references: list of references (straights, bends and tapers)
- ports: dict(input=PortIn, output=PortOut)
- length: float (how long is this route)
- RouteFactory: function that returns a Route.
"""
import json
import pathlib
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from omegaconf import OmegaConf
from phidl.device_layout import Label as LabelPhidl
from phidl.device_layout import Path
from pydantic import BaseModel
from typing_extensions import Literal
from gdsfactory.component import Component, ComponentReference
from gdsfactory.cross_section import CrossSection
from gdsfactory.port import Port
Anchor = Literal[
"ce",
"cw",
"nc",
"ne",
"nw",
"sc",
"se",
"sw",
"center",
"cc",
]
class Label(LabelPhidl):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
"""check with pydantic Label valid type"""
assert isinstance(v, LabelPhidl), f"TypeError, Got {type(v)}, expecting Label"
return v
class Route(BaseModel):
references: List[ComponentReference]
labels: Optional[List[Label]] = None
ports: Tuple[Port, Port]
length: float
class Routes(BaseModel):
references: List[ComponentReference]
lengths: List[float]
ports: Optional[List[Port]] = None
bend_radius: Optional[List[float]] = None
class ComponentModel(BaseModel):
component: str
settings: Optional[Dict[str, Any]]
class PlacementModel(BaseModel):
x: Union[str, float] = 0
y: Union[str, float] = 0
dx: float = 0
dy: float = 0
port: Optional[Union[str, Anchor]] = None
rotation: int = 0
mirror: bool = False
class RouteModel(BaseModel):
links: Dict[str, str]
settings: Optional[Dict[str, Any]] = None
routing_strategy: Optional[str] = None
class CircuitModel(BaseModel):
instances: Dict[str, ComponentModel]
name: Optional[str] = None
placements: Optional[Dict[str, PlacementModel]] = None
connections: Optional[List[Dict[str, str]]] = None
routes: Optional[Dict[str, RouteModel]] = None
info: Optional[Dict[str, Any]] = None
Float2 = Tuple[float, float]
Float3 = Tuple[float, float, float]
Floats = Tuple[float, ...]
Strs = Tuple[str, ...]
Int2 = Tuple[int, int]
Int3 = Tuple[int, int, int]
Ints = Tuple[int, ...]
Layer = Tuple[int, int]
Layers = Tuple[Layer, ...]
RouteFactory = Callable[..., Route]
ComponentFactory = Callable[..., Component]
ComponentFactoryDict = Dict[str, ComponentFactory]
PathFactory = Callable[..., Path]
PathType = Union[str, pathlib.Path]
PathTypes = Tuple[PathType, ...]
ComponentOrFactory = Union[ComponentFactory, Component]
ComponentOrFactoryOrList = Union[ComponentOrFactory, List[ComponentOrFactory]]
ComponentOrPath = Union[PathType, Component]
ComponentOrReference = Union[Component, ComponentReference]
NameToFunctionDict = Dict[str, ComponentFactory]
Number = Union[float, int]
Coordinate = Tuple[float, float]
Coordinates = Tuple[Coordinate, ...]
ComponentOrPath = Union[Component, PathType]
CrossSectionFactory = Callable[..., CrossSection]
CrossSectionOrFactory = Union[CrossSection, Callable[..., CrossSection]]
__all__ = (
"ComponentFactory",
"ComponentFactoryDict",
"ComponentOrFactory",
"ComponentOrPath",
"ComponentOrReference",
"Coordinate",
"Coordinates",
"CrossSectionFactory",
"CrossSectionOrFactory",
"Float2",
"Float3",
"Floats",
"Int2",
"Int3",
"Ints",
"Layer",
"Layers",
"NameToFunctionDict",
"Number",
"PathType",
"PathTypes",
"Route",
"RouteFactory",
"Routes",
"Strs",
)
def write_schema(model: BaseModel = CircuitModel):
s = model.schema_json()
d = OmegaConf.create(s)
f1 = pathlib.Path(__file__).parent / "schema.yaml"
f1.write_text(OmegaConf.to_yaml(d))
f2 = pathlib.Path(__file__).parent / "schema.json"
f2.write_text(json.dumps(OmegaConf.to_container(d)))
if __name__ == "__main__":
write_schema()
| 25.948864 | 86 | 0.696737 | import json
import pathlib
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from omegaconf import OmegaConf
from phidl.device_layout import Label as LabelPhidl
from phidl.device_layout import Path
from pydantic import BaseModel
from typing_extensions import Literal
from gdsfactory.component import Component, ComponentReference
from gdsfactory.cross_section import CrossSection
from gdsfactory.port import Port
Anchor = Literal[
"ce",
"cw",
"nc",
"ne",
"nw",
"sc",
"se",
"sw",
"center",
"cc",
]
class Label(LabelPhidl):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
assert isinstance(v, LabelPhidl), f"TypeError, Got {type(v)}, expecting Label"
return v
class Route(BaseModel):
references: List[ComponentReference]
labels: Optional[List[Label]] = None
ports: Tuple[Port, Port]
length: float
class Routes(BaseModel):
references: List[ComponentReference]
lengths: List[float]
ports: Optional[List[Port]] = None
bend_radius: Optional[List[float]] = None
class ComponentModel(BaseModel):
component: str
settings: Optional[Dict[str, Any]]
class PlacementModel(BaseModel):
x: Union[str, float] = 0
y: Union[str, float] = 0
dx: float = 0
dy: float = 0
port: Optional[Union[str, Anchor]] = None
rotation: int = 0
mirror: bool = False
class RouteModel(BaseModel):
links: Dict[str, str]
settings: Optional[Dict[str, Any]] = None
routing_strategy: Optional[str] = None
class CircuitModel(BaseModel):
instances: Dict[str, ComponentModel]
name: Optional[str] = None
placements: Optional[Dict[str, PlacementModel]] = None
connections: Optional[List[Dict[str, str]]] = None
routes: Optional[Dict[str, RouteModel]] = None
info: Optional[Dict[str, Any]] = None
Float2 = Tuple[float, float]
Float3 = Tuple[float, float, float]
Floats = Tuple[float, ...]
Strs = Tuple[str, ...]
Int2 = Tuple[int, int]
Int3 = Tuple[int, int, int]
Ints = Tuple[int, ...]
Layer = Tuple[int, int]
Layers = Tuple[Layer, ...]
RouteFactory = Callable[..., Route]
ComponentFactory = Callable[..., Component]
ComponentFactoryDict = Dict[str, ComponentFactory]
PathFactory = Callable[..., Path]
PathType = Union[str, pathlib.Path]
PathTypes = Tuple[PathType, ...]
ComponentOrFactory = Union[ComponentFactory, Component]
ComponentOrFactoryOrList = Union[ComponentOrFactory, List[ComponentOrFactory]]
ComponentOrPath = Union[PathType, Component]
ComponentOrReference = Union[Component, ComponentReference]
NameToFunctionDict = Dict[str, ComponentFactory]
Number = Union[float, int]
Coordinate = Tuple[float, float]
Coordinates = Tuple[Coordinate, ...]
ComponentOrPath = Union[Component, PathType]
CrossSectionFactory = Callable[..., CrossSection]
CrossSectionOrFactory = Union[CrossSection, Callable[..., CrossSection]]
__all__ = (
"ComponentFactory",
"ComponentFactoryDict",
"ComponentOrFactory",
"ComponentOrPath",
"ComponentOrReference",
"Coordinate",
"Coordinates",
"CrossSectionFactory",
"CrossSectionOrFactory",
"Float2",
"Float3",
"Floats",
"Int2",
"Int3",
"Ints",
"Layer",
"Layers",
"NameToFunctionDict",
"Number",
"PathType",
"PathTypes",
"Route",
"RouteFactory",
"Routes",
"Strs",
)
def write_schema(model: BaseModel = CircuitModel):
s = model.schema_json()
d = OmegaConf.create(s)
f1 = pathlib.Path(__file__).parent / "schema.yaml"
f1.write_text(OmegaConf.to_yaml(d))
f2 = pathlib.Path(__file__).parent / "schema.json"
f2.write_text(json.dumps(OmegaConf.to_container(d)))
if __name__ == "__main__":
write_schema()
| true | true |
1c3c6328ae82c26dbaf6b705307a78e18c87d06a | 1,529 | py | Python | example/router2x.py | shiyanhui/Router | df5974fec264345920ab1ed54b043493882e558f | [
"MIT"
] | 10 | 2017-11-26T03:01:07.000Z | 2021-12-02T03:56:31.000Z | example/router2x.py | shiyanhui/Router | df5974fec264345920ab1ed54b043493882e558f | [
"MIT"
] | 1 | 2020-11-30T16:40:00.000Z | 2020-11-30T16:40:00.000Z | example/router2x.py | shiyanhui/Router | df5974fec264345920ab1ed54b043493882e558f | [
"MIT"
] | 4 | 2018-03-19T10:01:39.000Z | 2020-11-30T17:05:05.000Z | # -*- coding: utf-8 -*-
"""
An Router example that in Python2.x
"""
from router import BaseRouter, cleanPath, toggleTrailingSlash
class Router(BaseRouter):
def __init__(self, redirectTrailingSlash=True, fixRequestPath=True,
notFoundHandler=None, methodNotAllowedHandler=None):
self.redirectTrailingSlash = redirectTrailingSlash
self.fixRequestPath = fixRequestPath
self.notFoundHandler = notFoundHandler
self.methodNotAllowedHandler = methodNotAllowedHandler
super(Router, self).__init__()
def handle(self, req):
path, method = req.path, req.method.upper()
if self.fixRequestPath:
path = cleanPath(path)
pathExisted, handler, req.params = self.tree.get(path, method)
if not pathExisted:
if self.redirectTrailingSlash:
path = toggleTrailingSlash(path)
pathExisted, _, _ = self.tree.get(path, method)
if pathExisted:
req.redirect(path)
req.wfile.flush()
return
if self.notFoundHandler:
self.notFoundHandler(req)
return
req.send_error(404)
req.wfile.flush()
return
if not handler:
if self.methodNotAllowedHandler:
self.methodNotAllowedHandler(req)
return
req.send_error(405)
req.wfile.flush()
return
handler(req)
| 29.980392 | 71 | 0.583388 |
from router import BaseRouter, cleanPath, toggleTrailingSlash
class Router(BaseRouter):
def __init__(self, redirectTrailingSlash=True, fixRequestPath=True,
notFoundHandler=None, methodNotAllowedHandler=None):
self.redirectTrailingSlash = redirectTrailingSlash
self.fixRequestPath = fixRequestPath
self.notFoundHandler = notFoundHandler
self.methodNotAllowedHandler = methodNotAllowedHandler
super(Router, self).__init__()
def handle(self, req):
path, method = req.path, req.method.upper()
if self.fixRequestPath:
path = cleanPath(path)
pathExisted, handler, req.params = self.tree.get(path, method)
if not pathExisted:
if self.redirectTrailingSlash:
path = toggleTrailingSlash(path)
pathExisted, _, _ = self.tree.get(path, method)
if pathExisted:
req.redirect(path)
req.wfile.flush()
return
if self.notFoundHandler:
self.notFoundHandler(req)
return
req.send_error(404)
req.wfile.flush()
return
if not handler:
if self.methodNotAllowedHandler:
self.methodNotAllowedHandler(req)
return
req.send_error(405)
req.wfile.flush()
return
handler(req)
| true | true |
1c3c6347aadaf337888eac53824915f4d798e2bb | 500 | py | Python | pythonUtils/ExploreDA/SummaryStatistics/utils.py | tgquintela/pythonUtils | 6f2e5ba3be67a48d3cd5cf72dcabfae04cfa7afe | [
"MIT"
] | 1 | 2015-07-21T05:15:11.000Z | 2015-07-21T05:15:11.000Z | pythonUtils/ExploreDA/SummaryStatistics/utils.py | tgquintela/pythonUtils | 6f2e5ba3be67a48d3cd5cf72dcabfae04cfa7afe | [
"MIT"
] | null | null | null | pythonUtils/ExploreDA/SummaryStatistics/utils.py | tgquintela/pythonUtils | 6f2e5ba3be67a48d3cd5cf72dcabfae04cfa7afe | [
"MIT"
] | null | null | null |
"""
Utils
-----
Statistics utils.
"""
def clean_dict_stats(stats):
"""Cleaner dict stats information. That function removes the plots stored
in the stats dictionary data base.
Parameters
----------
stats: dict
the stats dictionary database.
Returns
-------
stats: dict
the stats dictionary database without plots.
"""
for i in range(len(stats)):
if 'plots' in stats[i].keys():
del stats[i]['plots']
return stats
| 16.666667 | 77 | 0.588 |
def clean_dict_stats(stats):
for i in range(len(stats)):
if 'plots' in stats[i].keys():
del stats[i]['plots']
return stats
| true | true |
1c3c64dfbceee8a6e3fb2a0c39151dcdfa6ec219 | 522 | py | Python | air_hockey/game/scripting/change_scene_action.py | Nemo3003/cse210-06 | 30ecefac7f23927be904f48b29492bb2220262a8 | [
"Apache-2.0"
] | null | null | null | air_hockey/game/scripting/change_scene_action.py | Nemo3003/cse210-06 | 30ecefac7f23927be904f48b29492bb2220262a8 | [
"Apache-2.0"
] | null | null | null | air_hockey/game/scripting/change_scene_action.py | Nemo3003/cse210-06 | 30ecefac7f23927be904f48b29492bb2220262a8 | [
"Apache-2.0"
] | null | null | null | from constants import *
from game.scripting.action import Action
"""
In order to have a coherent game you need to change scenes constantly...that is what this module is for :)
"""
class ChangeSceneAction(Action):
def __init__(self, keyboard_service, next_scene):
self._keyboard_service = keyboard_service
self._next_scene = next_scene
def execute(self, cast, script, callback):
if self._keyboard_service.is_key_pressed(ENTER):
callback.on_next(self._next_scene)
| 30.705882 | 110 | 0.710728 | from constants import *
from game.scripting.action import Action
class ChangeSceneAction(Action):
def __init__(self, keyboard_service, next_scene):
self._keyboard_service = keyboard_service
self._next_scene = next_scene
def execute(self, cast, script, callback):
if self._keyboard_service.is_key_pressed(ENTER):
callback.on_next(self._next_scene)
| true | true |
1c3c66e744866def3414627e0860c3e0a5367fd9 | 2,580 | py | Python | plugins/checkdmarc/icon_checkdmarc/actions/check_domains_alternate_nameservers/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/checkdmarc/icon_checkdmarc/actions/check_domains_alternate_nameservers/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/checkdmarc/icon_checkdmarc/actions/check_domains_alternate_nameservers/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Check DMARC records against alternate name servers"
class Input:
DOMAIN = "domain"
NAMESERVERS = "nameservers"
TIMEOUT = "timeout"
class Output:
REPORT = "report"
class CheckDomainsAlternateNameserversInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"domain": {
"type": "string",
"title": "Domain",
"description": "Domain to check, in alternate nameserver",
"order": 1
},
"nameservers": {
"type": "array",
"title": "Nameservers",
"description": "Nameserver to check against",
"items": {
"type": "string"
},
"default": [
"1.1.1.1",
"1.0.0.1"
],
"order": 2
},
"timeout": {
"type": "number",
"title": "Timeout",
"description": "Timeout in seconds for request. Default is 6 seconds",
"default": 6,
"order": 3
}
},
"required": [
"domain",
"nameservers",
"timeout"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CheckDomainsAlternateNameserversOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"report": {
"$ref": "#/definitions/report",
"title": "Report",
"description": "Report",
"order": 1
}
},
"definitions": {
"report": {
"type": "object",
"title": "report",
"properties": {
"base_domain": {
"type": "string",
"title": "Base Domain",
"description": "Base domain",
"order": 2
},
"dmarc": {
"type": "object",
"title": "DMARC",
"description": "DMARC",
"order": 3
},
"domain": {
"type": "string",
"title": "Domain",
"description": "Domain",
"order": 1
},
"mx": {
"type": "object",
"title": "MX",
"description": "MX",
"order": 5
},
"ns": {
"type": "object",
"title": "NS",
"description": "NS",
"order": 4
},
"spf": {
"type": "object",
"title": "SPF",
"description": "SPF",
"order": 6
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 20.314961 | 76 | 0.471318 |
import komand
import json
class Component:
DESCRIPTION = "Check DMARC records against alternate name servers"
class Input:
DOMAIN = "domain"
NAMESERVERS = "nameservers"
TIMEOUT = "timeout"
class Output:
REPORT = "report"
class CheckDomainsAlternateNameserversInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"domain": {
"type": "string",
"title": "Domain",
"description": "Domain to check, in alternate nameserver",
"order": 1
},
"nameservers": {
"type": "array",
"title": "Nameservers",
"description": "Nameserver to check against",
"items": {
"type": "string"
},
"default": [
"1.1.1.1",
"1.0.0.1"
],
"order": 2
},
"timeout": {
"type": "number",
"title": "Timeout",
"description": "Timeout in seconds for request. Default is 6 seconds",
"default": 6,
"order": 3
}
},
"required": [
"domain",
"nameservers",
"timeout"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CheckDomainsAlternateNameserversOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"report": {
"$ref": "#/definitions/report",
"title": "Report",
"description": "Report",
"order": 1
}
},
"definitions": {
"report": {
"type": "object",
"title": "report",
"properties": {
"base_domain": {
"type": "string",
"title": "Base Domain",
"description": "Base domain",
"order": 2
},
"dmarc": {
"type": "object",
"title": "DMARC",
"description": "DMARC",
"order": 3
},
"domain": {
"type": "string",
"title": "Domain",
"description": "Domain",
"order": 1
},
"mx": {
"type": "object",
"title": "MX",
"description": "MX",
"order": 5
},
"ns": {
"type": "object",
"title": "NS",
"description": "NS",
"order": 4
},
"spf": {
"type": "object",
"title": "SPF",
"description": "SPF",
"order": 6
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true | true |
1c3c67aa1d0b25d633f496477aa384d08de4b753 | 2,963 | py | Python | jacket/tests/storage/functional/api/foxinsocks.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | null | null | null | jacket/tests/storage/functional/api/foxinsocks.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | null | null | null | jacket/tests/storage/functional/api/foxinsocks.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | 2 | 2016-08-10T02:21:49.000Z | 2020-07-24T01:57:21.000Z | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from jacket.api.storage.storage import extensions
from jacket.api.storage.storage.openstack import wsgi
class FoxInSocksController(object):
def index(self, req):
return "Try to say this Mr. Knox, sir..."
class FoxInSocksServerControllerExtension(wsgi.Controller):
@wsgi.action('add_tweedle')
def _add_tweedle(self, req, id, body):
return "Tweedle Beetle Added."
@wsgi.action('delete_tweedle')
def _delete_tweedle(self, req, id, body):
return "Tweedle Beetle Deleted."
@wsgi.action('fail')
def _fail(self, req, id, body):
raise webob.exc.HTTPBadRequest(explanation='Tweedle fail')
class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
# NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
# NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['big_bands'] = 'Pig Bands!'
class Foxinsocks(extensions.ExtensionDescriptor):
"""The Fox In Socks Extension."""
name = "Fox In Socks"
alias = "FOXNSOX"
namespace = "http://www.fox.in.socks/api/ext/pie/v1.0"
updated = "2011-01-22T13:25:27-06:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('foxnsocks',
FoxInSocksController())
resources.append(resource)
return resources
def get_controller_extensions(self):
extension_list = []
extension_set = [
(FoxInSocksServerControllerExtension, 'servers'),
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
(FoxInSocksFlavorBandsControllerExtension, 'flavors'), ]
for klass, collection in extension_set:
controller = klass()
ext = extensions.ControllerExtension(self, collection, controller)
extension_list.append(ext)
return extension_list
| 32.206522 | 78 | 0.674992 |
import webob.exc
from jacket.api.storage.storage import extensions
from jacket.api.storage.storage.openstack import wsgi
class FoxInSocksController(object):
def index(self, req):
return "Try to say this Mr. Knox, sir..."
class FoxInSocksServerControllerExtension(wsgi.Controller):
@wsgi.action('add_tweedle')
def _add_tweedle(self, req, id, body):
return "Tweedle Beetle Added."
@wsgi.action('delete_tweedle')
def _delete_tweedle(self, req, id, body):
return "Tweedle Beetle Deleted."
@wsgi.action('fail')
def _fail(self, req, id, body):
raise webob.exc.HTTPBadRequest(explanation='Tweedle fail')
class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
resp_obj.obj['big_bands'] = 'Pig Bands!'
class Foxinsocks(extensions.ExtensionDescriptor):
name = "Fox In Socks"
alias = "FOXNSOX"
namespace = "http://www.fox.in.socks/api/ext/pie/v1.0"
updated = "2011-01-22T13:25:27-06:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('foxnsocks',
FoxInSocksController())
resources.append(resource)
return resources
def get_controller_extensions(self):
extension_list = []
extension_set = [
(FoxInSocksServerControllerExtension, 'servers'),
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
(FoxInSocksFlavorBandsControllerExtension, 'flavors'), ]
for klass, collection in extension_set:
controller = klass()
ext = extensions.ControllerExtension(self, collection, controller)
extension_list.append(ext)
return extension_list
| true | true |
1c3c69279c6b1c2b47c8f13643a440eab54b9347 | 1,026 | py | Python | land-queue/main.py | bootstraponline/python | fe984fe3d6c1f49ad5b3e70a1cbe450466b46da3 | [
"Apache-2.0"
] | null | null | null | land-queue/main.py | bootstraponline/python | fe984fe3d6c1f49ad5b3e70a1cbe450466b46da3 | [
"Apache-2.0"
] | null | null | null | land-queue/main.py | bootstraponline/python | fe984fe3d6c1f49ad5b3e70a1cbe450466b46da3 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, Response
import octohook
from octohook.events import PullRequestEvent
from octohook.models import PullRequest, Repository
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
# X-GitHub-Event contains an event type defined here: https://developer.github.com/webhooks/
github_event = request.headers.get('X-GitHub-Event')
# X-GitHub-Event: pull_request
if github_event != 'pull_request':
return f'skipped event {github_event}', 200, {}
global last
last = request.json
# event : PullRequestEvent = octohook.parse(github_event, request.json)
return f'processed event {github_event} {last}', 200, {}
last = 'no webhook data'
@app.route('/')
def hello():
global last
return str(last)
# https://cloud.google.com/appengine/docs/standard/python3/configuring-warmup-requests
@app.route('/_ah/warmup')
def warmup():
return '', 200, {}
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| 27.72973 | 96 | 0.701754 | from flask import Flask, request, Response
import octohook
from octohook.events import PullRequestEvent
from octohook.models import PullRequest, Repository
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
github_event = request.headers.get('X-GitHub-Event')
if github_event != 'pull_request':
return f'skipped event {github_event}', 200, {}
global last
last = request.json
return f'processed event {github_event} {last}', 200, {}
last = 'no webhook data'
@app.route('/')
def hello():
global last
return str(last)
@app.route('/_ah/warmup')
def warmup():
return '', 200, {}
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| true | true |
1c3c69a01bcab218da4da5a072bc2ad21afe55f8 | 8,896 | py | Python | tests/unit/modules/test_at.py | nevins-b/salt | 56363bc41ca36e757103df3504d1bb07e3a7251b | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_at.py | nevins-b/salt | 56363bc41ca36e757103df3504d1bb07e3a7251b | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_at.py | nevins-b/salt | 56363bc41ca36e757103df3504d1bb07e3a7251b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.utils
import salt.modules.at as at
@skipIf(NO_MOCK, NO_MOCK_REASON)
class AtTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for the salt.modules.at module
'''
def setup_loader_modules(self):
return {at: {}}
atq_output = {'jobs': [{'date': '2014-12-11', 'job': 101, 'queue': 'A',
'tag': '', 'time': '19:48:47', 'user': 'B'}]}
@classmethod
def tearDownClass(cls):
del cls.atq_output
@patch('salt.modules.at._cmd', MagicMock(return_value=None))
def test_atq_not_available(self):
'''
Tests the at.atq not available for any type of os_family.
'''
with patch.dict(at.__grains__, {'os_family': 'RedHat'}):
self.assertEqual(at.atq(), '\'at.atq\' is not available.')
with patch.dict(at.__grains__, {'os_family': ''}):
self.assertEqual(at.atq(), '\'at.atq\' is not available.')
@patch('salt.modules.at._cmd', MagicMock(return_value=''))
def test_atq_no_jobs_available(self):
'''
Tests the no jobs available for any type of os_family.
'''
with patch.dict(at.__grains__, {'os_family': 'RedHat'}):
self.assertDictEqual(at.atq(), {'jobs': []})
with patch.dict(at.__grains__, {'os_family': ''}):
self.assertDictEqual(at.atq(), {'jobs': []})
@patch('salt.modules.at._cmd')
def test_atq_list(self, salt_modules_at__cmd_mock):
'''
Tests the list all queued and running jobs.
'''
salt_modules_at__cmd_mock.return_value = '101\tThu Dec 11 \
19:48:47 2014 A B'
with patch.dict(at.__grains__, {'os_family': '', 'os': ''}):
self.assertDictEqual(at.atq(), {'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:00',
'user': 'B'}]})
salt_modules_at__cmd_mock.return_value = '101\t2014-12-11 \
19:48:47 A B'
with patch.dict(at.__grains__, {'os_family': 'RedHat', 'os': ''}):
self.assertDictEqual(at.atq(), {'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:47',
'user': 'B'}]})
salt_modules_at__cmd_mock.return_value = 'SALT: Dec 11, \
2014 19:48 A 101 B'
with patch.dict(at.__grains__, {'os_family': '', 'os': 'OpenBSD'}):
self.assertDictEqual(at.atq(), {'jobs': [{'date': '2014-12-11',
'job': '101',
'queue': 'B',
'tag': '',
'time': '19:48:00',
'user': 'A'}]})
@patch('salt.modules.at.atq', MagicMock(return_value=atq_output))
def test_atrm(self):
"""
Tests for remove jobs from the queue.
"""
with patch.object(salt.utils, 'which', return_value=None):
self.assertEqual(at.atrm(), "'at.atrm' is not available.")
with patch.object(salt.utils, 'which', return_value=True):
self.assertDictEqual(at.atrm(), {'jobs': {'removed': [],
'tag': None}})
with patch.object(at, '_cmd', return_value=True):
with patch.object(salt.utils, 'which', return_value=True):
self.assertDictEqual(at.atrm('all'),
{'jobs': {'removed': ['101'],
'tag': None}})
with patch.object(at, '_cmd', return_value=True):
with patch.object(salt.utils, 'which', return_value=True):
self.assertDictEqual(at.atrm(101),
{'jobs': {'removed': ['101'],
'tag': None}})
with patch.object(at, '_cmd', return_value=None):
self.assertEqual(at.atrm(101), '\'at.atrm\' is not available.')
@patch('salt.modules.at.atq', MagicMock(return_value=atq_output))
def test_jobcheck(self):
"""
Tests for check the job from queue.
"""
self.assertDictEqual(at.jobcheck(),
{'error': 'You have given a condition'})
self.assertDictEqual(at.jobcheck(runas='foo'),
{'note': 'No match jobs or time format error',
'jobs': []})
self.assertDictEqual(at.jobcheck(runas='B', tag='', hour=19, minute=48,
day=11, month=12, Year=2014),
{'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:47',
'user': 'B'}]})
@patch('salt.modules.at.atq', MagicMock(return_value=atq_output))
def test_at(self):
"""
Tests for add a job to the queue.
"""
self.assertDictEqual(at.at(), {'jobs': []})
with patch.object(salt.utils, 'which', return_value=None):
self.assertEqual(at.at('12:05am', '/sbin/reboot', tag='reboot'),
"'at.at' is not available.")
with patch.object(salt.utils, 'which', return_value=True):
with patch.dict(at.__grains__, {'os_family': 'RedHat'}):
mock = MagicMock(return_value=None)
with patch.dict(at.__salt__, {'cmd.run': mock}):
self.assertEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
"'at.at' is not available.")
mock = MagicMock(return_value='Garbled time')
with patch.dict(at.__salt__, {'cmd.run': mock}):
self.assertDictEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
{'jobs': [],
'error': 'invalid timespec'})
mock = MagicMock(return_value='warning: commands\nA B')
with patch.dict(at.__salt__, {'cmd.run': mock}):
with patch.dict(at.__grains__, {'os': 'OpenBSD'}):
self.assertDictEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
{'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:47',
'user': 'B'}]})
with patch.dict(at.__grains__, {'os_family': ''}):
mock = MagicMock(return_value=None)
with patch.dict(at.__salt__, {'cmd.run': mock}):
self.assertEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
"'at.at' is not available.")
def test_atc(self):
"""
Tests for atc
"""
with patch.object(at, '_cmd', return_value=None):
self.assertEqual(at.atc(101), '\'at.atc\' is not available.')
with patch.object(at, '_cmd', return_value=''):
self.assertDictEqual(at.atc(101),
{'error': 'invalid job id \'101\''})
with patch.object(at, '_cmd',
return_value='101\tThu Dec 11 19:48:47 2014 A B'):
self.assertEqual(at.atc(101), '101\tThu Dec 11 19:48:47 2014 A B')
| 43.607843 | 79 | 0.437388 |
from __future__ import absolute_import
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
import salt.utils
import salt.modules.at as at
@skipIf(NO_MOCK, NO_MOCK_REASON)
class AtTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {at: {}}
atq_output = {'jobs': [{'date': '2014-12-11', 'job': 101, 'queue': 'A',
'tag': '', 'time': '19:48:47', 'user': 'B'}]}
@classmethod
def tearDownClass(cls):
del cls.atq_output
@patch('salt.modules.at._cmd', MagicMock(return_value=None))
def test_atq_not_available(self):
with patch.dict(at.__grains__, {'os_family': 'RedHat'}):
self.assertEqual(at.atq(), '\'at.atq\' is not available.')
with patch.dict(at.__grains__, {'os_family': ''}):
self.assertEqual(at.atq(), '\'at.atq\' is not available.')
@patch('salt.modules.at._cmd', MagicMock(return_value=''))
def test_atq_no_jobs_available(self):
with patch.dict(at.__grains__, {'os_family': 'RedHat'}):
self.assertDictEqual(at.atq(), {'jobs': []})
with patch.dict(at.__grains__, {'os_family': ''}):
self.assertDictEqual(at.atq(), {'jobs': []})
@patch('salt.modules.at._cmd')
def test_atq_list(self, salt_modules_at__cmd_mock):
salt_modules_at__cmd_mock.return_value = '101\tThu Dec 11 \
19:48:47 2014 A B'
with patch.dict(at.__grains__, {'os_family': '', 'os': ''}):
self.assertDictEqual(at.atq(), {'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:00',
'user': 'B'}]})
salt_modules_at__cmd_mock.return_value = '101\t2014-12-11 \
19:48:47 A B'
with patch.dict(at.__grains__, {'os_family': 'RedHat', 'os': ''}):
self.assertDictEqual(at.atq(), {'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:47',
'user': 'B'}]})
salt_modules_at__cmd_mock.return_value = 'SALT: Dec 11, \
2014 19:48 A 101 B'
with patch.dict(at.__grains__, {'os_family': '', 'os': 'OpenBSD'}):
self.assertDictEqual(at.atq(), {'jobs': [{'date': '2014-12-11',
'job': '101',
'queue': 'B',
'tag': '',
'time': '19:48:00',
'user': 'A'}]})
@patch('salt.modules.at.atq', MagicMock(return_value=atq_output))
def test_atrm(self):
with patch.object(salt.utils, 'which', return_value=None):
self.assertEqual(at.atrm(), "'at.atrm' is not available.")
with patch.object(salt.utils, 'which', return_value=True):
self.assertDictEqual(at.atrm(), {'jobs': {'removed': [],
'tag': None}})
with patch.object(at, '_cmd', return_value=True):
with patch.object(salt.utils, 'which', return_value=True):
self.assertDictEqual(at.atrm('all'),
{'jobs': {'removed': ['101'],
'tag': None}})
with patch.object(at, '_cmd', return_value=True):
with patch.object(salt.utils, 'which', return_value=True):
self.assertDictEqual(at.atrm(101),
{'jobs': {'removed': ['101'],
'tag': None}})
with patch.object(at, '_cmd', return_value=None):
self.assertEqual(at.atrm(101), '\'at.atrm\' is not available.')
@patch('salt.modules.at.atq', MagicMock(return_value=atq_output))
def test_jobcheck(self):
self.assertDictEqual(at.jobcheck(),
{'error': 'You have given a condition'})
self.assertDictEqual(at.jobcheck(runas='foo'),
{'note': 'No match jobs or time format error',
'jobs': []})
self.assertDictEqual(at.jobcheck(runas='B', tag='', hour=19, minute=48,
day=11, month=12, Year=2014),
{'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:47',
'user': 'B'}]})
@patch('salt.modules.at.atq', MagicMock(return_value=atq_output))
def test_at(self):
self.assertDictEqual(at.at(), {'jobs': []})
with patch.object(salt.utils, 'which', return_value=None):
self.assertEqual(at.at('12:05am', '/sbin/reboot', tag='reboot'),
"'at.at' is not available.")
with patch.object(salt.utils, 'which', return_value=True):
with patch.dict(at.__grains__, {'os_family': 'RedHat'}):
mock = MagicMock(return_value=None)
with patch.dict(at.__salt__, {'cmd.run': mock}):
self.assertEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
"'at.at' is not available.")
mock = MagicMock(return_value='Garbled time')
with patch.dict(at.__salt__, {'cmd.run': mock}):
self.assertDictEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
{'jobs': [],
'error': 'invalid timespec'})
mock = MagicMock(return_value='warning: commands\nA B')
with patch.dict(at.__salt__, {'cmd.run': mock}):
with patch.dict(at.__grains__, {'os': 'OpenBSD'}):
self.assertDictEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
{'jobs': [{'date': '2014-12-11',
'job': 101,
'queue': 'A',
'tag': '',
'time': '19:48:47',
'user': 'B'}]})
with patch.dict(at.__grains__, {'os_family': ''}):
mock = MagicMock(return_value=None)
with patch.dict(at.__salt__, {'cmd.run': mock}):
self.assertEqual(at.at('12:05am', '/sbin/reboot',
tag='reboot'),
"'at.at' is not available.")
def test_atc(self):
with patch.object(at, '_cmd', return_value=None):
self.assertEqual(at.atc(101), '\'at.atc\' is not available.')
with patch.object(at, '_cmd', return_value=''):
self.assertDictEqual(at.atc(101),
{'error': 'invalid job id \'101\''})
with patch.object(at, '_cmd',
return_value='101\tThu Dec 11 19:48:47 2014 A B'):
self.assertEqual(at.atc(101), '101\tThu Dec 11 19:48:47 2014 A B')
| true | true |
1c3c69b936dcc5734222d1aea08bd31bc4e801ef | 268 | py | Python | BMI.py | olgarozhdestvina/pands-problems | f6593d340c6b3d18254658248507e7b3a98ac10a | [
"MIT"
] | null | null | null | BMI.py | olgarozhdestvina/pands-problems | f6593d340c6b3d18254658248507e7b3a98ac10a | [
"MIT"
] | null | null | null | BMI.py | olgarozhdestvina/pands-problems | f6593d340c6b3d18254658248507e7b3a98ac10a | [
"MIT"
] | null | null | null | # This program calculates your Body Mass Index (BMI)
height = float (input("Enter your height in cm: "))
weight = float (input("Enter your weight in kg: "))
squareOfHeight = height ** 2 / 10000
BMI = weight / squareOfHeight
print("Your BMI is {:.2f}".format(BMI)) | 29.777778 | 52 | 0.690299 |
height = float (input("Enter your height in cm: "))
weight = float (input("Enter your weight in kg: "))
squareOfHeight = height ** 2 / 10000
BMI = weight / squareOfHeight
print("Your BMI is {:.2f}".format(BMI)) | true | true |
1c3c69d7951b3ed9aac8b92f8a418eb8a3e56d7c | 1,145 | py | Python | Lib/site-packages/pylint/testutils/constants.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/pylint/testutils/constants.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/pylint/testutils/constants.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
import operator
import re
import sys
from pathlib import Path
SYS_VERS_STR = (
"%d%d%d" % sys.version_info[:3] # pylint: disable=consider-using-f-string
)
TITLE_UNDERLINES = ["", "=", "-", "."]
UPDATE_OPTION = "--update-functional-output"
UPDATE_FILE = Path("pylint-functional-test-update")
# Common sub-expressions.
_MESSAGE = {"msg": r"[a-z][a-z\-]+"}
# Matches a #,
# - followed by a comparison operator and a Python version (optional),
# - followed by a line number with a +/- (optional),
# - followed by a list of bracketed message symbols.
# Used to extract expected messages from testdata files.
_EXPECTED_RE = re.compile(
r"\s*#\s*(?:(?P<line>[+-]?[0-9]+):)?" # pylint: disable=consider-using-f-string
r"(?:(?P<op>[><=]+) *(?P<version>[0-9.]+):)?"
r"\s*\[(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)]" % _MESSAGE
)
_OPERATORS = {">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le}
| 38.166667 | 87 | 0.652402 |
import operator
import re
import sys
from pathlib import Path
SYS_VERS_STR = (
"%d%d%d" % sys.version_info[:3]
)
TITLE_UNDERLINES = ["", "=", "-", "."]
UPDATE_OPTION = "--update-functional-output"
UPDATE_FILE = Path("pylint-functional-test-update")
_MESSAGE = {"msg": r"[a-z][a-z\-]+"}
_EXPECTED_RE = re.compile(
r"\s*#\s*(?:(?P<line>[+-]?[0-9]+):)?"
r"(?:(?P<op>[><=]+) *(?P<version>[0-9.]+):)?"
r"\s*\[(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)]" % _MESSAGE
)
_OPERATORS = {">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le}
| true | true |
1c3c6b3e9b128d11073c24674f64b5a9e881cfdb | 7,639 | py | Python | openprescribing/frontend/management/commands/infer_practice_boundaries.py | annapowellsmith/openpresc | cfa9fb07d6fc2ee304159c04fcc132cefcf78745 | [
"MIT"
] | null | null | null | openprescribing/frontend/management/commands/infer_practice_boundaries.py | annapowellsmith/openpresc | cfa9fb07d6fc2ee304159c04fcc132cefcf78745 | [
"MIT"
] | null | null | null | openprescribing/frontend/management/commands/infer_practice_boundaries.py | annapowellsmith/openpresc | cfa9fb07d6fc2ee304159c04fcc132cefcf78745 | [
"MIT"
] | null | null | null | """
Infer boundaries for practices from their locations using a Voronoi partition
(i.e. assign each point in the country to its closest practice)
These boundaries have no real meaning but they're easy enough to explain and
they allow us to render plausible-looking maps for arbitrary collections of
practices.
The boundaries are clipped at the national border to stop them extending into
the sea -- or Wales -- and generally looking ridiculous.
"""
import os
import random
import string
from django.conf import settings
from django.contrib.gis.db.models import Collect, Union
from django.contrib.gis.geos import GEOSException, GEOSGeometry, MultiPolygon, Polygon
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from django.db.models import Func
from frontend.models import PCT, Practice
NATIONAL_BOUNDARY_FILE = os.path.join(
settings.REPO_ROOT, "openprescribing/media/geojson/england-boundary.geojson"
)
class Command(BaseCommand):
help = __doc__
def handle(self, *args, **options):
infer_practice_boundaries()
def get_practices():
return Practice.objects.filter(location__isnull=False, setting=4).exclude(
status_code__in=(
Practice.STATUS_RETIRED,
Practice.STATUS_DORMANT,
Practice.STATUS_CLOSED,
)
)
def infer_practice_boundaries():
practices = get_practices()
partition = practices.aggregate(
voronoi=Func(Collect("location"), function="ST_VoronoiPolygons")
)["voronoi"]
national_boundary = get_national_boundary()
practice_regions = get_practice_code_to_region_map(partition, national_boundary)
with transaction.atomic():
for practice in practices:
practice.boundary = practice_regions[practice.code]
practice.save(update_fields=["boundary"])
def get_practice_code_to_region_map(regions, clip_boundary):
with connection.cursor() as cursor:
return _get_practice_code_to_region_map(cursor, regions, clip_boundary)
def _get_practice_code_to_region_map(cursor, regions, clip_boundary):
"""
Return a dict mapping practice codes to the region in `regions` in which
they're located, with returned regions clipped to `clip_boundary`
"""
# Temporary tables are automatically deleted when the connection closes,
# but during testing we can sometimes have multiple process trying to
# create the same temporary table so we make the name unique
random_str = "".join([random.choice(string.ascii_lowercase) for _ in range(8)])
temporary_table_name = "regions_{}".format(random_str)
def cursor_execute(sql, *params):
cursor.execute(sql.format(regions=temporary_table_name), *params)
cursor_execute(
"CREATE TEMPORARY TABLE {regions} (original GEOMETRY, clipped GEOMETRY)"
)
bad_practices = []
for region in regions:
try:
clipped = region.intersection(clip_boundary)
# This is a workaround for the error "Relate Operation called with a
# LWGEOMCOLLECTION type" which happens when clipped boundaries end up
# including things which aren't polygons (i.e. points or lines) and we then
# try to do ST_Contains queries on them. Generating a zero-width buffer
# causes all non-polygons to get dropped. See:
# https://lists.osgeo.org/pipermail/postgis-users/2008-August/020740.html
# (Why clipping would result in non-polygons in some cases is totally
# unclear, but somehow it does.)
clipped = clipped.buffer(0.0)
except GEOSException as e:
clipped = None
# We sometimes get this error when we have "bad practices" as below; we want
# to swallow it so we can identify all the problematic cases rather than
# dying on the first one
if str(e) != (
"Error encountered checking Geometry returned from GEOS C function "
'"GEOSIntersection_r".'
):
raise
if clipped is None or clipped.empty:
bad_practices.extend(get_practices().filter(location__within=region))
else:
cursor_execute(
"INSERT INTO {regions} (original, clipped) VALUES (%s, %s)",
[region.ewkb, clipped.ewkb],
)
if bad_practices:
practice_desc = "\n".join(
(
f"{p.code}: {p.name} ({p.postcode}) http://www.openstreetmap.org/"
f"?zoom=12&mlat={p.location.y}&mlon={p.location.x}"
)
for p in bad_practices
)
raise RuntimeError(
f"Some practices appear to be located entirely outside the national "
f"boundary (as determined by aggregating all CCG boundaries) so probably "
f"there's some dodgy data somewhere. Offending practices are:\n\n"
f"{practice_desc}"
)
cursor_execute("CREATE INDEX {regions}_idx ON {regions} USING GIST (original)")
cursor_execute("ANALYSE {regions}")
# We match practices to regions using the original, unclipped boundary.
# This allows us to handle the case that a practice lies just outside its
# clipped boundary due to imprecision in the geographic data.
cursor_execute(
"""
SELECT
p.code,
r.clipped
FROM
frontend_practice AS p
JOIN
{regions} AS r
ON
ST_Contains(r.original, p.location)
"""
)
return dict(cursor.fetchall())
def get_national_boundary():
# In theory there's a `geos.fromfile` method, but it doesn't work
with open(NATIONAL_BOUNDARY_FILE, "rb") as f:
contents = f.read()
return GEOSGeometry(contents)
# This function is left in place in case we ever want to update the boundary
# file, which we shouldn't need to under ordinary circumstances. The reason we
# use a static file rather than dynamically generating the boundary from CCG
# data each time is that we can't always guarantee to have complete CCG
# boundary data and we don't want that to prevent us from importing practice
# data
def update_national_boundary_file():
"""
Generate a national boundary by joining together all CCG boundaries and
write it to disk
Run with:
echo 'import frontend.management.commands.infer_practice_boundaries as c;' \
'c.update_national_boundary_file()' \
| ./manage.py shell
"""
ccgs_without_boundary = PCT.objects.filter(
org_type="CCG", close_date__isnull=True, boundary__isnull=True
)
if ccgs_without_boundary.exists():
raise RuntimeError(
"""
Some active CCGs missing boundary data, meaning we can't reliably
synthesize a national boundary by aggregating CCGs
"""
)
boundary = PCT.objects.filter(boundary__isnull=False).aggregate(
boundary=Union("boundary")
)["boundary"]
# Add a tiny bit of buffer to the boundary otherwise, due to inaccuracies, practices
# very close to the edge can sometimes end up outside
boundary = boundary.buffer(0.003)
# Get rid of any holes in the resulting polygons: the boundary follows rivers quite
# a long way inland and, combined with the buffering above, this can leave holes
boundary = MultiPolygon([Polygon(element.exterior_ring) for element in boundary])
# Merge any overlapping polygons
boundary = boundary.unary_union
with open(NATIONAL_BOUNDARY_FILE, "w") as f:
f.write(boundary.geojson)
| 38.580808 | 88 | 0.679277 | import os
import random
import string
from django.conf import settings
from django.contrib.gis.db.models import Collect, Union
from django.contrib.gis.geos import GEOSException, GEOSGeometry, MultiPolygon, Polygon
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from django.db.models import Func
from frontend.models import PCT, Practice
NATIONAL_BOUNDARY_FILE = os.path.join(
settings.REPO_ROOT, "openprescribing/media/geojson/england-boundary.geojson"
)
class Command(BaseCommand):
help = __doc__
def handle(self, *args, **options):
infer_practice_boundaries()
def get_practices():
return Practice.objects.filter(location__isnull=False, setting=4).exclude(
status_code__in=(
Practice.STATUS_RETIRED,
Practice.STATUS_DORMANT,
Practice.STATUS_CLOSED,
)
)
def infer_practice_boundaries():
practices = get_practices()
partition = practices.aggregate(
voronoi=Func(Collect("location"), function="ST_VoronoiPolygons")
)["voronoi"]
national_boundary = get_national_boundary()
practice_regions = get_practice_code_to_region_map(partition, national_boundary)
with transaction.atomic():
for practice in practices:
practice.boundary = practice_regions[practice.code]
practice.save(update_fields=["boundary"])
def get_practice_code_to_region_map(regions, clip_boundary):
with connection.cursor() as cursor:
return _get_practice_code_to_region_map(cursor, regions, clip_boundary)
def _get_practice_code_to_region_map(cursor, regions, clip_boundary):
random_str = "".join([random.choice(string.ascii_lowercase) for _ in range(8)])
temporary_table_name = "regions_{}".format(random_str)
def cursor_execute(sql, *params):
cursor.execute(sql.format(regions=temporary_table_name), *params)
cursor_execute(
"CREATE TEMPORARY TABLE {regions} (original GEOMETRY, clipped GEOMETRY)"
)
bad_practices = []
for region in regions:
try:
clipped = region.intersection(clip_boundary)
# LWGEOMCOLLECTION type" which happens when clipped boundaries end up
# try to do ST_Contains queries on them. Generating a zero-width buffer
# causes all non-polygons to get dropped. See:
# https://lists.osgeo.org/pipermail/postgis-users/2008-August/020740.html
# (Why clipping would result in non-polygons in some cases is totally
# unclear, but somehow it does.)
clipped = clipped.buffer(0.0)
except GEOSException as e:
clipped = None
# We sometimes get this error when we have "bad practices" as below; we want
# to swallow it so we can identify all the problematic cases rather than
# dying on the first one
if str(e) != (
"Error encountered checking Geometry returned from GEOS C function "
'"GEOSIntersection_r".'
):
raise
if clipped is None or clipped.empty:
bad_practices.extend(get_practices().filter(location__within=region))
else:
cursor_execute(
"INSERT INTO {regions} (original, clipped) VALUES (%s, %s)",
[region.ewkb, clipped.ewkb],
)
if bad_practices:
practice_desc = "\n".join(
(
f"{p.code}: {p.name} ({p.postcode}) http://www.openstreetmap.org/"
f"?zoom=12&mlat={p.location.y}&mlon={p.location.x}"
)
for p in bad_practices
)
raise RuntimeError(
f"Some practices appear to be located entirely outside the national "
f"boundary (as determined by aggregating all CCG boundaries) so probably "
f"there's some dodgy data somewhere. Offending practices are:\n\n"
f"{practice_desc}"
)
cursor_execute("CREATE INDEX {regions}_idx ON {regions} USING GIST (original)")
cursor_execute("ANALYSE {regions}")
cursor_execute(
"""
SELECT
p.code,
r.clipped
FROM
frontend_practice AS p
JOIN
{regions} AS r
ON
ST_Contains(r.original, p.location)
"""
)
return dict(cursor.fetchall())
def get_national_boundary():
with open(NATIONAL_BOUNDARY_FILE, "rb") as f:
contents = f.read()
return GEOSGeometry(contents)
# use a static file rather than dynamically generating the boundary from CCG
# data each time is that we can't always guarantee to have complete CCG
# data
def update_national_boundary_file():
ccgs_without_boundary = PCT.objects.filter(
org_type="CCG", close_date__isnull=True, boundary__isnull=True
)
if ccgs_without_boundary.exists():
raise RuntimeError(
"""
Some active CCGs missing boundary data, meaning we can't reliably
synthesize a national boundary by aggregating CCGs
"""
)
boundary = PCT.objects.filter(boundary__isnull=False).aggregate(
boundary=Union("boundary")
)["boundary"]
boundary = boundary.buffer(0.003)
boundary = MultiPolygon([Polygon(element.exterior_ring) for element in boundary])
boundary = boundary.unary_union
with open(NATIONAL_BOUNDARY_FILE, "w") as f:
f.write(boundary.geojson)
| true | true |
1c3c6bdcdaeb74ab1e9d878a49b0c9660f5344cb | 1,836 | py | Python | HCA_In_and_Out/Q_In_and_Out_orig.py | hotchilianalytics/hca-resources | 051fcad7bf94ff0b7543adb227a769f0b9cead67 | [
"Apache-2.0"
] | 2 | 2022-02-22T12:46:48.000Z | 2022-03-28T21:58:13.000Z | HCA_In_and_Out/Q_In_and_Out_orig.py | hotchilianalytics/hca-resources | 051fcad7bf94ff0b7543adb227a769f0b9cead67 | [
"Apache-2.0"
] | null | null | null | HCA_In_and_Out/Q_In_and_Out_orig.py | hotchilianalytics/hca-resources | 051fcad7bf94ff0b7543adb227a769f0b9cead67 | [
"Apache-2.0"
] | 6 | 2021-05-26T14:56:40.000Z | 2022-02-14T15:56:27.000Z | # Vlad Code from: Aleksei Dremov in
# https://www.quantopian.com/posts/live-slash-paper-trade-the-in-out-stragegy
# Price relative ratios (intersection) with wait days
import numpy as np
# -----------------------------------------------------------------------------------------------
STOCKS = symbols('QQQ'); BONDS = symbols('TLT','IEF'); LEV = 1.00; wt = {};
A = symbol('SLV'); B = symbol('GLD'); C = symbol('XLI'); D = symbol('XLU');
MKT = symbol('QQQ'); VOLA = 126; LB = 1.00; BULL = 1; COUNT = 0; OUT_DAY = 0; RET_INITIAL = 80;
# -----------------------------------------------------------------------------------------------
def initialize(context):
schedule_function(daily_check, date_rules.every_day(), time_rules.market_open(minutes = 140))
schedule_function(record_vars, date_rules.every_day(), time_rules.market_close())
def daily_check(context,data):
global BULL, COUNT, OUT_DAY
vola = data.history(MKT, 'price', VOLA + 1, '1d').pct_change().std() * np.sqrt(252)
WAIT_DAYS = int(vola * RET_INITIAL)
RET = int((1.0 - vola) * RET_INITIAL)
P = data.history([A,B,C,D], 'price', RET + 2, '1d').iloc[:-1].dropna()
ratio_ab = (P[A].iloc[-1] / P[A].iloc[0]) / (P[B].iloc[-1] / P[B].iloc[0])
ratio_cd = (P[C].iloc[-1] / P[C].iloc[0]) / (P[D].iloc[-1] / P[D].iloc[0])
exit = ratio_ab < LB and ratio_cd < LB
if exit: BULL = 0; OUT_DAY = COUNT;
elif (COUNT >= OUT_DAY + WAIT_DAYS): BULL = 1
COUNT += 1
wt_stk = LEV if BULL else 0;
wt_bnd = 0 if BULL else LEV;
for sec in STOCKS: wt[sec] = wt_stk / len(STOCKS);
for sec in BONDS: wt[sec] = wt_bnd / len(BONDS)
for sec, weight in wt.items():
order_target_percent(sec, weight)
record( wt_bnd = wt_bnd, wt_stk = wt_stk )
def record_vars(context, data):
record(leverage = context.account.leverage) | 51 | 97 | 0.568083 |
import numpy as np
STOCKS = symbols('QQQ'); BONDS = symbols('TLT','IEF'); LEV = 1.00; wt = {};
A = symbol('SLV'); B = symbol('GLD'); C = symbol('XLI'); D = symbol('XLU');
MKT = symbol('QQQ'); VOLA = 126; LB = 1.00; BULL = 1; COUNT = 0; OUT_DAY = 0; RET_INITIAL = 80;
def initialize(context):
schedule_function(daily_check, date_rules.every_day(), time_rules.market_open(minutes = 140))
schedule_function(record_vars, date_rules.every_day(), time_rules.market_close())
def daily_check(context,data):
global BULL, COUNT, OUT_DAY
vola = data.history(MKT, 'price', VOLA + 1, '1d').pct_change().std() * np.sqrt(252)
WAIT_DAYS = int(vola * RET_INITIAL)
RET = int((1.0 - vola) * RET_INITIAL)
P = data.history([A,B,C,D], 'price', RET + 2, '1d').iloc[:-1].dropna()
ratio_ab = (P[A].iloc[-1] / P[A].iloc[0]) / (P[B].iloc[-1] / P[B].iloc[0])
ratio_cd = (P[C].iloc[-1] / P[C].iloc[0]) / (P[D].iloc[-1] / P[D].iloc[0])
exit = ratio_ab < LB and ratio_cd < LB
if exit: BULL = 0; OUT_DAY = COUNT;
elif (COUNT >= OUT_DAY + WAIT_DAYS): BULL = 1
COUNT += 1
wt_stk = LEV if BULL else 0;
wt_bnd = 0 if BULL else LEV;
for sec in STOCKS: wt[sec] = wt_stk / len(STOCKS);
for sec in BONDS: wt[sec] = wt_bnd / len(BONDS)
for sec, weight in wt.items():
order_target_percent(sec, weight)
record( wt_bnd = wt_bnd, wt_stk = wt_stk )
def record_vars(context, data):
record(leverage = context.account.leverage) | true | true |
1c3c6c0e4797303db7b5ac6091ee96eacc4c40da | 2,386 | py | Python | tools/c7n_gcp/tests/test_notify_gcp.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 2 | 2022-02-16T07:45:20.000Z | 2022-02-19T10:25:30.000Z | tools/c7n_gcp/tests/test_notify_gcp.py | al3pht/cloud-custodian | ce6613d1b716f336384c5e308eee300389e6bf50 | [
"Apache-2.0"
] | 28 | 2020-09-23T03:56:48.000Z | 2021-04-21T19:08:55.000Z | tools/c7n_gcp/tests/test_notify_gcp.py | lfranchini31/cloud-custodian | 1830fe4b9a59ff6afb675985c9ea531571616a76 | [
"Apache-2.0"
] | 9 | 2019-11-18T07:46:44.000Z | 2020-04-15T11:20:20.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from gcp_common import BaseTest
from c7n_gcp.client import Session
import mock
class NotifyTest(BaseTest):
def test_pubsub_notify(self):
factory = self.replay_flight_data("notify-action")
orig_client = Session.client
stub_client = mock.MagicMock()
calls = []
def client_factory(*args, **kw):
calls.append(args)
if len(calls) == 1:
return orig_client(*args, **kw)
return stub_client
self.patch(Session, 'client', client_factory)
p = self.load_policy({
'name': 'test-notify',
'resource': 'gcp.pubsub-topic',
'filters': [
{
'name': 'projects/cloud-custodian/topics/gcptestnotifytopic'
}
],
'actions': [
{'type': 'notify',
'template': 'default',
'priority_header': '2',
'subject': 'testing notify action',
'to': ['user@domain.com'],
'transport':
{'type': 'pubsub',
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic'}
}
]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
stub_client.execute_command.assert_called_once()
stub_client.execute_command.assert_called_with(
'publish', {
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic',
'body': {
'messages': {
'data': ('eJzdUrtqAzEQ7PUVh+qcjd2EuEqVLl8QgpFXe2cFnVZIq8Bh/O/'
'RA58vkCqkSrHNDDuPZS9C4ic6lofOJWsfhFQAlBwfjc6YhBSZtFGu3'
'+2fdvLO/0wGHA25wilrC+DJGpgzcBHSqQkLxRi5d8RmmNtOpBSgUiP4jU'
'+nmE49kzdQ+MFYxhAz/SZWKj7QBwLHLVhKul+'
'ybOti3GapYtR8mpi4ivfagHPIRZBnXwXviRgnbxVXVOOgkuXaJRgKhuf'
'jGZXGUNh9wXPakuRWzbixa1pdc6qSVO1kihieNU3KuA3QJGsgDspFT4Hb'
'nW6B2iHadon/69K5trguxb+b/OPWq9/6i+/JcvDoDq+'
'K4Yz6ZfWVTbUcucwX+HoY5Q==')
}}})
| 36.151515 | 92 | 0.523889 |
from gcp_common import BaseTest
from c7n_gcp.client import Session
import mock
class NotifyTest(BaseTest):
def test_pubsub_notify(self):
factory = self.replay_flight_data("notify-action")
orig_client = Session.client
stub_client = mock.MagicMock()
calls = []
def client_factory(*args, **kw):
calls.append(args)
if len(calls) == 1:
return orig_client(*args, **kw)
return stub_client
self.patch(Session, 'client', client_factory)
p = self.load_policy({
'name': 'test-notify',
'resource': 'gcp.pubsub-topic',
'filters': [
{
'name': 'projects/cloud-custodian/topics/gcptestnotifytopic'
}
],
'actions': [
{'type': 'notify',
'template': 'default',
'priority_header': '2',
'subject': 'testing notify action',
'to': ['user@domain.com'],
'transport':
{'type': 'pubsub',
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic'}
}
]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
stub_client.execute_command.assert_called_once()
stub_client.execute_command.assert_called_with(
'publish', {
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic',
'body': {
'messages': {
'data': ('eJzdUrtqAzEQ7PUVh+qcjd2EuEqVLl8QgpFXe2cFnVZIq8Bh/O/'
'RA58vkCqkSrHNDDuPZS9C4ic6lofOJWsfhFQAlBwfjc6YhBSZtFGu3'
'+2fdvLO/0wGHA25wilrC+DJGpgzcBHSqQkLxRi5d8RmmNtOpBSgUiP4jU'
'+nmE49kzdQ+MFYxhAz/SZWKj7QBwLHLVhKul+'
'ybOti3GapYtR8mpi4ivfagHPIRZBnXwXviRgnbxVXVOOgkuXaJRgKhuf'
'jGZXGUNh9wXPakuRWzbixa1pdc6qSVO1kihieNU3KuA3QJGsgDspFT4Hb'
'nW6B2iHadon/69K5trguxb+b/OPWq9/6i+/JcvDoDq+'
'K4Yz6ZfWVTbUcucwX+HoY5Q==')
}}})
| true | true |
1c3c6d0902d1556e63c9c900ce2de241056464e4 | 751 | py | Python | tests/instance_testing.py | zachbateman/tracer | 03a27113ac3abd888b49a1ea9717a0b7ab2999ce | [
"MIT"
] | null | null | null | tests/instance_testing.py | zachbateman/tracer | 03a27113ac3abd888b49a1ea9717a0b7ab2999ce | [
"MIT"
] | null | null | null | tests/instance_testing.py | zachbateman/tracer | 03a27113ac3abd888b49a1ea9717a0b7ab2999ce | [
"MIT"
] | null | null | null | # import unittest
import sys
sys.path.insert(1, '..')
import tracer
class TestClass(metaclass=tracer.Tracer):
def __init__(self):
self.value = 0
def add(self, val):
self.value += val
if val == 5:
x = 1 / 0
def subtract(self, val):
self.value -= val
if __name__ == '__main__':
test = TestClass()
test2 = TestClass()
test.add(3)
test.subtract(2)
test.add(4)
test.add(8)
test.print_trace()
test.add(1)
test2.add(7)
# use below line to test error handling and trace printout
# test.add(5)
test2.subtract(3)
test.print_trace()
test2.add(1)
test.print_trace()
test2.print_trace()
test.print_trace()
test2.print_trace()
| 17.465116 | 62 | 0.588549 |
import sys
sys.path.insert(1, '..')
import tracer
class TestClass(metaclass=tracer.Tracer):
def __init__(self):
self.value = 0
def add(self, val):
self.value += val
if val == 5:
x = 1 / 0
def subtract(self, val):
self.value -= val
if __name__ == '__main__':
test = TestClass()
test2 = TestClass()
test.add(3)
test.subtract(2)
test.add(4)
test.add(8)
test.print_trace()
test.add(1)
test2.add(7)
test2.subtract(3)
test.print_trace()
test2.add(1)
test.print_trace()
test2.print_trace()
test.print_trace()
test2.print_trace()
| true | true |
1c3c6d3d8274d50cb8f0d89b2f5f9510d3d2225f | 18,223 | py | Python | rock/run.py | vita-epfl/rock-pytorch | 6f4c86d3fec7fe3b0ce65d2687d144e9698e964f | [
"Apache-2.0"
] | 9 | 2020-06-29T17:21:30.000Z | 2022-01-26T08:46:11.000Z | rock/run.py | vita-epfl/rock-pytorch | 6f4c86d3fec7fe3b0ce65d2687d144e9698e964f | [
"Apache-2.0"
] | null | null | null | rock/run.py | vita-epfl/rock-pytorch | 6f4c86d3fec7fe3b0ce65d2687d144e9698e964f | [
"Apache-2.0"
] | null | null | null | import argparse
from typing import Union
import torch
def int_or_none(value: str) -> Union[None, int]:
if value == 'None':
return None
return int(value)
def cli() -> argparse.Namespace:
""" Command line interface
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Different parsers for main actions', dest='command')
prep_epilog = 'Note: If no val_split_path is provided, this command creates a train/test set. ' \
'Otherwise, this command creates a train/val/test set, ' \
'with the val set extracted from the test set. ' \
'It is recommended to change the default train_save_path and test_save_path when adding a val set.'
prep_parser = subparsers.add_parser("prep",
help='preprocess the NYUv2 dataset',
epilog=prep_epilog)
train_parser = subparsers.add_parser("train",
help='train the ROCK network')
eval_parser = subparsers.add_parser("eval",
help='evaluate a trained ROCK network using COCOeval')
image_folder_parser = subparsers.add_parser("create_image_folder",
help="creates an image folder from the preprocessed NYUv2 dataset")
detect_parser = subparsers.add_parser("detect",
help='detect objects using a trained network')
# Arguments for preprocessing
prep_parser.add_argument('--dataset_path',
help='path to the NYUv2 dataset (default: %(default)s)',
default='data/nyu_depth_v2_labeled.mat')
prep_parser.add_argument('--splits_path',
help='path to the NYUv2 official splits (default: %(default)s)',
default='data/splits.mat')
prep_parser.add_argument('--normals_path',
help='path to the folder containing the normals and normals masks (default: %(default)s)',
default='data/normals_gt')
prep_parser.add_argument('--val_split_path',
help='path containing the samples to be used for validation. '
'No validation data if no path is provided (default: %(default)s)',
default=None)
prep_parser.add_argument('--train_save_path',
help='path where the train data will be saved (default: %(default)s)',
default='data/train_test/nyuv2_train')
prep_parser.add_argument('--test_save_path',
help='path where the test data will be saved (default: %(default)s)',
default='data/train_test/nyuv2_test')
prep_parser.add_argument('--val_save_path',
help='path where the val data will be saved '
'(if an argument for --val_split_path is provided) (default: %(default)s)',
default='data/train_val_test/nyuv2_val')
prep_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
# Arguments for training
train_parser.add_argument('--train_path',
help='path to the training data (default: %(default)s)',
default='data/train_test/nyuv2_train')
train_parser.add_argument('--val_path',
help='path to the validation data (default: %(default)s)',
default=None)
train_parser.add_argument('--device',
help='gpu used for training (type: %(type)s) (default: %(default)s)',
type=torch.device, default='cuda')
train_parser.add_argument('--num_iters',
help='number of iterations (type: %(type)s) (default: %(default)s)',
type=int, default=30_000)
train_parser.add_argument('--lr',
help='learning rate (type: %(type)s) (default: %(default)s)',
type=float, default=5e-5)
train_parser.add_argument('--weight_decay',
help='weight decay for optimizer (type: %(type)s) (default: %(default)s)',
type=float, default=2e-3)
train_parser.add_argument('--scheduler_milestones',
help='iteration milestones at which the learning rate is decreased '
'(type: %(type)s) (default: %(default)s)',
type=int, default=[25_000, ], nargs='+')
train_parser.add_argument('--scheduler_gamma',
help='gamma value for the scheduler, by which the learning rate is multiplied '
'at each milestone (type: %(type)s) (default: %(default)s)',
type=float, default=0.1)
train_parser.add_argument('--force_crops',
help='crop all training images during data augmentation, '
'instead of leaving some images uncropped',
action='store_true')
train_parser.add_argument('--no_rock',
help='remove rock block from model '
'(obtains a baseline single shot detector, no auxiliary tasks)',
action='store_true')
train_parser.add_argument('--aux_tasks',
help='list of auxiliary tasks to train on (type: %(type)s) (default: %(default)s)',
type=str, default=['scene', 'depth', 'normals'], nargs='*')
train_parser.add_argument('--use_all_priors_conf_loss',
help='switches to a loss taking into account all negative examples (all priors) instead '
'of just the top negative examples for the confidence loss.',
action='store_true')
train_parser.add_argument('--writer_path',
help='path to the folder where the tensorboard runs will be stored '
'(i.e. data/runs/rock) (default: %(default)s)',
default=None)
train_parser.add_argument('--save_path',
help='path to the folder where the model weights will be saved '
'(i.e. data/models/rock) (default: %(default)s)',
default='data/models/default_model/')
train_parser.add_argument('--checkpoint_path',
help='path to the folder where a trained model is saved '
'(i.e. models/rock). If provided, training will be resumed on that model '
'(default: %(default)s)',
default=None)
train_parser.add_argument('--coco_json_save_path',
help='path to which ground truth and prediction JSON files are saved '
'using the COCO data format, which are then used for evaluation '
'(more info: https://cocodataset.org/#format-data) (default: %(default)s)',
default='data/eval')
train_parser.add_argument('--save_best_on_val',
help='saves the model with the best mAP on val data',
action='store_true')
train_parser.add_argument('--val_eval_freq',
help='frequency at which the model is evaluated on the validation data, in epochs. '
'If None, model is never evaluated (type: %(type)s) (default: %(default)s)',
type=int_or_none, default=10)
train_parser.add_argument('--train_eval_freq',
help='frequency at which the model is evaluated on the training data, in epochs. '
'If None, model is never evaluated (type: %(type)s) (default: %(default)s)',
type=int_or_none, default=50)
train_parser.add_argument('--image_to_tb_freq',
help='frequency at which an image grid is added to Tensorboard, in epochs. '
'If None, no image grid is added (type: %(type)s) (default: %(default)s)',
type=int_or_none, default=20)
train_parser.add_argument('--model_save_freq',
help='frequency at which the model is saved, in epochs. '
'If None, no model is saved at a specific epoch number '
'(but can still be saved when training is finished or if --save_best_on_val is set) '
'(type: %(type)s) (default: %(default)s)',
type=int_or_none, default=None)
train_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
# Arguments for evaluation
eval_parser.add_argument('model_path',
help='path containing the model weights')
eval_parser.add_argument('--test_path',
help='path to the folder containing the test data on which to run the evaluation '
'(default: %(default)s)',
default='data/train_test/nyuv2_test')
eval_parser.add_argument('--device',
help='gpu used for evaluating (type: %(type)s) (default: %(default)s)',
type=torch.device, default='cuda')
eval_parser.add_argument('--no_rock',
help='remove rock block from model '
'(obtains a baseline single shot detector, no auxiliary tasks)',
action='store_true')
eval_parser.add_argument('--aux_tasks',
help='list of auxiliary tasks to train on (type: %(type)s) (default: %(default)s)',
type=str, default=['scene', 'depth', 'normals'], nargs='*')
eval_parser.add_argument('--coco_json_save_path',
help='path to which ground truth and prediction JSON files are saved '
'using the COCO data format, which are then used for evaluation '
'(more info: https://cocodataset.org/#format-data) (default: %(default)s)',
default='data/eval')
eval_parser.add_argument('--show_all_cats',
help='show the mAP for all categories',
action='store_true')
eval_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
# Arguments for image folder creation
image_folder_parser.add_argument('--data_path',
help='path to the folder containing the images to extract (default: %(default)s)',
default='data/train_test/nyuv2_test')
image_folder_parser.add_argument('--save_path',
help='path to the folder in which to save the new images (default: %(default)s)',
default='data/detection/images')
image_folder_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
# Arguments for object detection
detect_parser.add_argument('model_path',
help='path containing the model weights')
detect_parser.add_argument('--image_path',
help='path to the folder in which the images are saved (default: %(default)s)',
default='data/detection/images')
detect_parser.add_argument('--detection_output_path',
help='path to the folder in which the object detections are saved (default: %(default)s)',
default='data/detection/output')
detect_parser.add_argument('--scene_output_path',
help='path to the folder where the scene predictions will be saved. '
'Only works if the model contains a ROCK block (default: %(default)s)',
default=None)
detect_parser.add_argument('--depth_output_path',
help='path to the folder where the depth predictions will be saved. '
'Only works if the model contains a ROCK block (default: %(default)s)',
default=None)
detect_parser.add_argument('--normals_output_path',
help='path to the folder where the surface normals predictions will be saved. '
'Only works if the model contains a ROCK block (default: %(default)s)',
default=None)
detect_parser.add_argument('--device',
help='device used for object detection (type: %(type)s) (default: %(default)s)',
type=torch.device, default='cuda')
detect_parser.add_argument('--no_rock',
help='remove rock block from model '
'(obtains a baseline single shot detector, no auxiliary tasks)',
action='store_true')
detect_parser.add_argument('--aux_tasks',
help='list of auxiliary tasks to train on (type: %(type)s) (default: %(default)s)',
type=str, default=['scene', 'depth', 'normals'], nargs='*')
detect_parser.add_argument('--conf_threshold',
help='only show objects above a certain confidence threshold '
'(type: %(type)s) (default: %(default)s)',
type=float, default=0.4)
detect_parser.add_argument('--get_throughput',
help='shows the throughput (images/sec) of the model (forward pass only). '
'This disables saving the results of the object detection to a folder',
action='store_true')
detect_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
args = parser.parse_args()
return args
def disable_rock_for_empty_aux_tasks(args: argparse.Namespace) -> argparse.Namespace:
""" Disable the rock block if no aux tasks are given, and transform aux_tasks to a tuple
"""
args.aux_tasks = tuple(args.aux_tasks)
if not args.aux_tasks:
args.no_rock = True
return args
def main() -> None:
""" Parses the command-line arguments and calls the appropriate function
"""
args = cli()
if args.command == 'prep':
from rock.prep import prep_data
prep_data(dataset_path=args.dataset_path, splits_path=args.splits_path, normals_path=args.normals_path,
train_save_path=args.train_save_path, test_save_path=args.test_save_path,
val_save_path=args.val_save_path, val_split_path=args.val_split_path, verbose=not args.no_verbose)
if args.command == 'train':
from rock.trainer import train
args = disable_rock_for_empty_aux_tasks(args)
train(train_path=args.train_path, val_path=args.val_path, device=args.device, num_iters=args.num_iters,
lr=args.lr, weight_decay=args.weight_decay, scheduler_milestones=args.scheduler_milestones,
scheduler_gamma=args.scheduler_gamma, forced_crops=args.force_crops,
aux=not args.no_rock, aux_tasks=args.aux_tasks, use_all_priors_conf_loss=args.use_all_priors_conf_loss,
writer_path=args.writer_path, save_path=args.save_path, checkpoint_path=args.checkpoint_path,
coco_json_save_path=args.coco_json_save_path, save_best_on_val=args.save_best_on_val,
val_eval_freq=args.val_eval_freq, train_eval_freq=args.train_eval_freq,
image_to_tb_freq=args.image_to_tb_freq, model_save_freq=args.model_save_freq, verbose=not args.no_verbose)
if args.command == 'eval':
from rock.eval import evaluate_model
args = disable_rock_for_empty_aux_tasks(args)
evaluate_model(model_path=args.model_path, test_path=args.test_path, device=args.device,
aux=not args.no_rock, aux_tasks=args.aux_tasks, coco_json_save_path=args.coco_json_save_path,
show_all_cats=args.show_all_cats, verbose=not args.no_verbose)
if args.command == 'create_image_folder':
from rock.datasets.image_folder import extract_image_and_save_to_folder
extract_image_and_save_to_folder(data_folder_path=args.data_path, save_folder_path=args.save_path,
verbose=not args.no_verbose)
if args.command == 'detect':
from rock.detect import object_detection
args = disable_rock_for_empty_aux_tasks(args)
object_detection(model_path=args.model_path, image_folder_path=args.image_path,
detection_output_path=args.detection_output_path, scene_output_path=args.scene_output_path,
depth_output_path=args.depth_output_path, normals_output_path=args.normals_output_path,
device=args.device, aux=not args.no_rock, aux_tasks=args.aux_tasks,
conf_threshold=args.conf_threshold, throughput=args.get_throughput,
verbose=not args.no_verbose)
if __name__ == '__main__':
main()
| 62.194539 | 121 | 0.557427 | import argparse
from typing import Union
import torch
def int_or_none(value: str) -> Union[None, int]:
if value == 'None':
return None
return int(value)
def cli() -> argparse.Namespace:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Different parsers for main actions', dest='command')
prep_epilog = 'Note: If no val_split_path is provided, this command creates a train/test set. ' \
'Otherwise, this command creates a train/val/test set, ' \
'with the val set extracted from the test set. ' \
'It is recommended to change the default train_save_path and test_save_path when adding a val set.'
prep_parser = subparsers.add_parser("prep",
help='preprocess the NYUv2 dataset',
epilog=prep_epilog)
train_parser = subparsers.add_parser("train",
help='train the ROCK network')
eval_parser = subparsers.add_parser("eval",
help='evaluate a trained ROCK network using COCOeval')
image_folder_parser = subparsers.add_parser("create_image_folder",
help="creates an image folder from the preprocessed NYUv2 dataset")
detect_parser = subparsers.add_parser("detect",
help='detect objects using a trained network')
prep_parser.add_argument('--dataset_path',
help='path to the NYUv2 dataset (default: %(default)s)',
default='data/nyu_depth_v2_labeled.mat')
prep_parser.add_argument('--splits_path',
help='path to the NYUv2 official splits (default: %(default)s)',
default='data/splits.mat')
prep_parser.add_argument('--normals_path',
help='path to the folder containing the normals and normals masks (default: %(default)s)',
default='data/normals_gt')
prep_parser.add_argument('--val_split_path',
help='path containing the samples to be used for validation. '
'No validation data if no path is provided (default: %(default)s)',
default=None)
prep_parser.add_argument('--train_save_path',
help='path where the train data will be saved (default: %(default)s)',
default='data/train_test/nyuv2_train')
prep_parser.add_argument('--test_save_path',
help='path where the test data will be saved (default: %(default)s)',
default='data/train_test/nyuv2_test')
prep_parser.add_argument('--val_save_path',
help='path where the val data will be saved '
'(if an argument for --val_split_path is provided) (default: %(default)s)',
default='data/train_val_test/nyuv2_val')
prep_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
train_parser.add_argument('--train_path',
help='path to the training data (default: %(default)s)',
default='data/train_test/nyuv2_train')
train_parser.add_argument('--val_path',
help='path to the validation data (default: %(default)s)',
default=None)
train_parser.add_argument('--device',
help='gpu used for training (type: %(type)s) (default: %(default)s)',
type=torch.device, default='cuda')
train_parser.add_argument('--num_iters',
help='number of iterations (type: %(type)s) (default: %(default)s)',
type=int, default=30_000)
train_parser.add_argument('--lr',
help='learning rate (type: %(type)s) (default: %(default)s)',
type=float, default=5e-5)
train_parser.add_argument('--weight_decay',
help='weight decay for optimizer (type: %(type)s) (default: %(default)s)',
type=float, default=2e-3)
train_parser.add_argument('--scheduler_milestones',
help='iteration milestones at which the learning rate is decreased '
'(type: %(type)s) (default: %(default)s)',
type=int, default=[25_000, ], nargs='+')
train_parser.add_argument('--scheduler_gamma',
help='gamma value for the scheduler, by which the learning rate is multiplied '
'at each milestone (type: %(type)s) (default: %(default)s)',
type=float, default=0.1)
train_parser.add_argument('--force_crops',
help='crop all training images during data augmentation, '
'instead of leaving some images uncropped',
action='store_true')
train_parser.add_argument('--no_rock',
help='remove rock block from model '
'(obtains a baseline single shot detector, no auxiliary tasks)',
action='store_true')
train_parser.add_argument('--aux_tasks',
help='list of auxiliary tasks to train on (type: %(type)s) (default: %(default)s)',
type=str, default=['scene', 'depth', 'normals'], nargs='*')
train_parser.add_argument('--use_all_priors_conf_loss',
help='switches to a loss taking into account all negative examples (all priors) instead '
'of just the top negative examples for the confidence loss.',
action='store_true')
train_parser.add_argument('--writer_path',
help='path to the folder where the tensorboard runs will be stored '
'(i.e. data/runs/rock) (default: %(default)s)',
default=None)
train_parser.add_argument('--save_path',
help='path to the folder where the model weights will be saved '
'(i.e. data/models/rock) (default: %(default)s)',
default='data/models/default_model/')
train_parser.add_argument('--checkpoint_path',
help='path to the folder where a trained model is saved '
'(i.e. models/rock). If provided, training will be resumed on that model '
'(default: %(default)s)',
default=None)
train_parser.add_argument('--coco_json_save_path',
help='path to which ground truth and prediction JSON files are saved '
'using the COCO data format, which are then used for evaluation '
'(more info: https://cocodataset.org/#format-data) (default: %(default)s)',
default='data/eval')
train_parser.add_argument('--save_best_on_val',
help='saves the model with the best mAP on val data',
action='store_true')
train_parser.add_argument('--val_eval_freq',
help='frequency at which the model is evaluated on the validation data, in epochs. '
'If None, model is never evaluated (type: %(type)s) (default: %(default)s)',
type=int_or_none, default=10)
train_parser.add_argument('--train_eval_freq',
help='frequency at which the model is evaluated on the training data, in epochs. '
'If None, model is never evaluated (type: %(type)s) (default: %(default)s)',
type=int_or_none, default=50)
train_parser.add_argument('--image_to_tb_freq',
help='frequency at which an image grid is added to Tensorboard, in epochs. '
'If None, no image grid is added (type: %(type)s) (default: %(default)s)',
type=int_or_none, default=20)
train_parser.add_argument('--model_save_freq',
help='frequency at which the model is saved, in epochs. '
'If None, no model is saved at a specific epoch number '
'(but can still be saved when training is finished or if --save_best_on_val is set) '
'(type: %(type)s) (default: %(default)s)',
type=int_or_none, default=None)
train_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
eval_parser.add_argument('model_path',
help='path containing the model weights')
eval_parser.add_argument('--test_path',
help='path to the folder containing the test data on which to run the evaluation '
'(default: %(default)s)',
default='data/train_test/nyuv2_test')
eval_parser.add_argument('--device',
help='gpu used for evaluating (type: %(type)s) (default: %(default)s)',
type=torch.device, default='cuda')
eval_parser.add_argument('--no_rock',
help='remove rock block from model '
'(obtains a baseline single shot detector, no auxiliary tasks)',
action='store_true')
eval_parser.add_argument('--aux_tasks',
help='list of auxiliary tasks to train on (type: %(type)s) (default: %(default)s)',
type=str, default=['scene', 'depth', 'normals'], nargs='*')
eval_parser.add_argument('--coco_json_save_path',
help='path to which ground truth and prediction JSON files are saved '
'using the COCO data format, which are then used for evaluation '
'(more info: https://cocodataset.org/#format-data) (default: %(default)s)',
default='data/eval')
eval_parser.add_argument('--show_all_cats',
help='show the mAP for all categories',
action='store_true')
eval_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
image_folder_parser.add_argument('--data_path',
help='path to the folder containing the images to extract (default: %(default)s)',
default='data/train_test/nyuv2_test')
image_folder_parser.add_argument('--save_path',
help='path to the folder in which to save the new images (default: %(default)s)',
default='data/detection/images')
image_folder_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
detect_parser.add_argument('model_path',
help='path containing the model weights')
detect_parser.add_argument('--image_path',
help='path to the folder in which the images are saved (default: %(default)s)',
default='data/detection/images')
detect_parser.add_argument('--detection_output_path',
help='path to the folder in which the object detections are saved (default: %(default)s)',
default='data/detection/output')
detect_parser.add_argument('--scene_output_path',
help='path to the folder where the scene predictions will be saved. '
'Only works if the model contains a ROCK block (default: %(default)s)',
default=None)
detect_parser.add_argument('--depth_output_path',
help='path to the folder where the depth predictions will be saved. '
'Only works if the model contains a ROCK block (default: %(default)s)',
default=None)
detect_parser.add_argument('--normals_output_path',
help='path to the folder where the surface normals predictions will be saved. '
'Only works if the model contains a ROCK block (default: %(default)s)',
default=None)
detect_parser.add_argument('--device',
help='device used for object detection (type: %(type)s) (default: %(default)s)',
type=torch.device, default='cuda')
detect_parser.add_argument('--no_rock',
help='remove rock block from model '
'(obtains a baseline single shot detector, no auxiliary tasks)',
action='store_true')
detect_parser.add_argument('--aux_tasks',
help='list of auxiliary tasks to train on (type: %(type)s) (default: %(default)s)',
type=str, default=['scene', 'depth', 'normals'], nargs='*')
detect_parser.add_argument('--conf_threshold',
help='only show objects above a certain confidence threshold '
'(type: %(type)s) (default: %(default)s)',
type=float, default=0.4)
detect_parser.add_argument('--get_throughput',
help='shows the throughput (images/sec) of the model (forward pass only). '
'This disables saving the results of the object detection to a folder',
action='store_true')
detect_parser.add_argument('--no_verbose',
help='disable verbose', action='store_true')
args = parser.parse_args()
return args
def disable_rock_for_empty_aux_tasks(args: argparse.Namespace) -> argparse.Namespace:
args.aux_tasks = tuple(args.aux_tasks)
if not args.aux_tasks:
args.no_rock = True
return args
def main() -> None:
args = cli()
if args.command == 'prep':
from rock.prep import prep_data
prep_data(dataset_path=args.dataset_path, splits_path=args.splits_path, normals_path=args.normals_path,
train_save_path=args.train_save_path, test_save_path=args.test_save_path,
val_save_path=args.val_save_path, val_split_path=args.val_split_path, verbose=not args.no_verbose)
if args.command == 'train':
from rock.trainer import train
args = disable_rock_for_empty_aux_tasks(args)
train(train_path=args.train_path, val_path=args.val_path, device=args.device, num_iters=args.num_iters,
lr=args.lr, weight_decay=args.weight_decay, scheduler_milestones=args.scheduler_milestones,
scheduler_gamma=args.scheduler_gamma, forced_crops=args.force_crops,
aux=not args.no_rock, aux_tasks=args.aux_tasks, use_all_priors_conf_loss=args.use_all_priors_conf_loss,
writer_path=args.writer_path, save_path=args.save_path, checkpoint_path=args.checkpoint_path,
coco_json_save_path=args.coco_json_save_path, save_best_on_val=args.save_best_on_val,
val_eval_freq=args.val_eval_freq, train_eval_freq=args.train_eval_freq,
image_to_tb_freq=args.image_to_tb_freq, model_save_freq=args.model_save_freq, verbose=not args.no_verbose)
if args.command == 'eval':
from rock.eval import evaluate_model
args = disable_rock_for_empty_aux_tasks(args)
evaluate_model(model_path=args.model_path, test_path=args.test_path, device=args.device,
aux=not args.no_rock, aux_tasks=args.aux_tasks, coco_json_save_path=args.coco_json_save_path,
show_all_cats=args.show_all_cats, verbose=not args.no_verbose)
if args.command == 'create_image_folder':
from rock.datasets.image_folder import extract_image_and_save_to_folder
extract_image_and_save_to_folder(data_folder_path=args.data_path, save_folder_path=args.save_path,
verbose=not args.no_verbose)
if args.command == 'detect':
from rock.detect import object_detection
args = disable_rock_for_empty_aux_tasks(args)
object_detection(model_path=args.model_path, image_folder_path=args.image_path,
detection_output_path=args.detection_output_path, scene_output_path=args.scene_output_path,
depth_output_path=args.depth_output_path, normals_output_path=args.normals_output_path,
device=args.device, aux=not args.no_rock, aux_tasks=args.aux_tasks,
conf_threshold=args.conf_threshold, throughput=args.get_throughput,
verbose=not args.no_verbose)
if __name__ == '__main__':
main()
| true | true |
1c3c6ec0ce7a0c228603fedba7710e079d0764cc | 1,325 | py | Python | updatesproducer/tests/mongodb_tests.py | AppleteeYT/Iris | b60deb6575820253bad50b48b9b39023d6440fd4 | [
"Apache-2.0"
] | null | null | null | updatesproducer/tests/mongodb_tests.py | AppleteeYT/Iris | b60deb6575820253bad50b48b9b39023d6440fd4 | [
"Apache-2.0"
] | null | null | null | updatesproducer/tests/mongodb_tests.py | AppleteeYT/Iris | b60deb6575820253bad50b48b9b39023d6440fd4 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import unittest
from datetime import datetime
from unittest import TestCase
from updatesproducer.db.mongodb_config import MongoDbConfig
from updatesproducer.db.updates_repository import UpdatesRepository
class MongoDbTests(TestCase):
def __init__(self, *args, **kwargs):
super(MongoDbTests, self).__init__(*args, **kwargs)
logging.basicConfig(
format='[%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S %z',
level=logging.DEBUG)
appsettings = json.load(open('appsettings.json'))
self.__mongodb_config = MongoDbConfig(appsettings['mongodb'])
def test_updates_repository(self):
repository = UpdatesRepository(
self.__mongodb_config,
logging.getLogger(UpdatesRepository.__name__)
)
user_id = 'test_user'
# Mongodb is less accurate with date microseconds
update_time = datetime.now().replace(microsecond=0)
repository.set_user_latest_update_time(user_id, update_time)
# Db returns a dictionary with objectid
self.assertDictContainsSubset(
dict(user_id=user_id, latest_update_time=update_time),
repository.get_user_latest_update_time(user_id)
)
if __name__ == '__main__':
unittest.main()
| 30.113636 | 69 | 0.683774 | import json
import logging
import unittest
from datetime import datetime
from unittest import TestCase
from updatesproducer.db.mongodb_config import MongoDbConfig
from updatesproducer.db.updates_repository import UpdatesRepository
class MongoDbTests(TestCase):
def __init__(self, *args, **kwargs):
super(MongoDbTests, self).__init__(*args, **kwargs)
logging.basicConfig(
format='[%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S %z',
level=logging.DEBUG)
appsettings = json.load(open('appsettings.json'))
self.__mongodb_config = MongoDbConfig(appsettings['mongodb'])
def test_updates_repository(self):
repository = UpdatesRepository(
self.__mongodb_config,
logging.getLogger(UpdatesRepository.__name__)
)
user_id = 'test_user'
update_time = datetime.now().replace(microsecond=0)
repository.set_user_latest_update_time(user_id, update_time)
self.assertDictContainsSubset(
dict(user_id=user_id, latest_update_time=update_time),
repository.get_user_latest_update_time(user_id)
)
if __name__ == '__main__':
unittest.main()
| true | true |
1c3c6f00110fd348675027667b761d5e3bbceed6 | 62,333 | py | Python | src/transformers/models/speech_to_text/modeling_speech_to_text.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 34 | 2021-07-05T02:44:31.000Z | 2022-03-28T14:39:57.000Z | src/transformers/models/speech_to_text/modeling_speech_to_text.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 3 | 2021-07-22T15:49:44.000Z | 2022-03-19T08:46:27.000Z | src/transformers/models/speech_to_text/modeling_speech_to_text.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 6 | 2021-07-05T02:44:32.000Z | 2022-02-14T10:10:13.000Z | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Speech2Text model. """
import math
import random
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_speech_to_text import Speech2TextConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "Speech2TextConfig"
_TOKENIZER_FOR_DOC = "Speech2TextTokenizer"
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/s2t-small-librispeech-asr",
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
class Conv1dSubsampler(nn.Module):
"""
Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
via gated linear units (https://arxiv.org/abs/1911.08460)
"""
def __init__(self, config):
super(Conv1dSubsampler, self).__init__()
self.config = config
self.num_layers = config.num_conv_layers
self.in_channels = config.input_feat_per_channel * config.input_channels
self.mid_channels = config.conv_channels
self.out_channels = config.d_model
self.kernel_sizes = config.conv_kernel_sizes
self.conv_layers = nn.ModuleList(
nn.Conv1d(
self.in_channels if i == 0 else self.mid_channels // 2,
self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
kernel_size=k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(self.kernel_sizes)
)
def forward(self, input_features):
hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T
for conv in self.conv_layers:
hidden_states = conv(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D)
return hidden_states
class Speech2TextSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward, put the weights on correct device
emb_weights = emb_weights.to(self.weights.device)
self.weights = nn.Parameter(emb_weights)
self.weights.requires_grad = False
self.weights.detach_()
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
@torch.no_grad()
def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
bsz, seq_len = input_ids.size()
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
input_ids.device
)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
def create_position_ids_from_input_ids(
self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text
class Speech2TextAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class Speech2TextEncoderLayer(nn.Module):
def __init__(self, config: Speech2TextConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Speech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape :obj:`(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
:obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
:obj:`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class Speech2TextDecoderLayer(nn.Module):
def __init__(self, config: Speech2TextConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Speech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = Speech2TextAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape :obj:`(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
:obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape :obj:`(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
:obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
:obj:`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size :obj:`(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class Speech2TextPreTrainedModel(PreTrainedModel):
config_class = Speech2TextConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, (nn.Linear, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _get_subsampled_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.config.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def _get_subsampled_encoder_attn_mask(self, attention_mask):
# generate creates 3D attention mask, becuase of the shape of input_features
# convert it to 2D if thats the case
if len(attention_mask.shape) > 2:
attention_mask = attention_mask[:, :, -1]
subsampled_lengths = self._get_subsampled_output_lengths(attention_mask.sum(-1))
max_len = subsampled_lengths.max().item()
bsz = attention_mask.size()[0]
attention_mask = torch.zeros((bsz, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
return attention_mask
SPEECH_TO_TEXT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.Speech2TextConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_features (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
by loading a ``.flac`` or ``.wav`` audio file into an array of type :obj:`List[float]` or a
:obj:`numpy.ndarray`, *e.g.* via the soundfile library (``pip install soundfile``). To prepare the array
into :obj:`input_features`, the :class:`~transformers.Speech2TextTokenizer` should be used for extracting
the fbank features, padding and conversion into a tensor of type :obj:`torch.FloatTensor`. See
:meth:`~transformers.Speech2TextTokenizer.__call__`
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in ``[0,
1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the :obj:`input_ids` to the right, following the paper.
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read
:func:`modeling_speech_to_text._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the
paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy.
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class Speech2TextEncoder(Speech2TextPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`Speech2TextEncoderLayer`.
Args:
config: Speech2TextConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv = Conv1dSubsampler(config)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_source_positions,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def forward(
self,
input_features,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_features (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a ``.flac`` or ``.wav`` audio file into an array of type :obj:`List[float]` or a
:obj:`numpy.ndarray`, *e.g.* via the soundfile library (``pip install soundfile``). To prepare the
array into :obj:`input_features`, the :class:`~transformers.Speech2TextTokenizer` should be used for
extracting the fbank features, padding and conversion into a tensor of type :obj:`torch.FloatTensor`.
See :meth:`~transformers.Speech2TextTokenizer.__call__`
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is not None:
attention_mask = self._get_subsampled_encoder_attn_mask(attention_mask)
inputs_embeds = self.conv(input_features)
inputs_embeds = self.embed_scale * inputs_embeds
if attention_mask is None:
padding_mask = torch.zeros_like(inputs_embeds, dtype=torch.long)
else:
padding_mask = attention_mask.ne(1).long()
embed_pos = self.embed_positions(padding_mask)
hidden_states = inputs_embeds + embed_pos
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class Speech2TextDecoder(Speech2TextPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`Speech2TextDecoderLayer`
Args:
config: Speech2TextConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_target_positions
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_target_positions,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.Speech2TextTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = self._get_subsampled_encoder_attn_mask(encoder_attention_mask)
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
encoder_head_mask[idx] if encoder_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class Speech2TextModel(Speech2TextPreTrainedModel):
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.encoder = Speech2TextEncoder(config)
self.decoder = Speech2TextDecoder(config)
self.init_weights()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="s2t_transformer_s",
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_features=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_features,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
encoder_head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The Speech2Text Model with a language modeling head. Can be used for summarization.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"encoder\.version",
r"decoder\.version",
r"model.encoder.embed_positions.weights",
r"model.decoder.embed_positions.weights",
]
_keys_to_ignore_on_save = [
r"model.encoder.embed_positions.weights",
r"model.decoder.embed_positions.weights",
]
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.model = Speech2TextModel(config)
self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False)
self.init_weights()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
return new_embeddings
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_features=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
Example::
>>> import torch
>>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> processor = Speech2Textprocessor.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> def map_to_array(batch):
>>> speech, _ = sf.read(batch["file"])
>>> batch["speech"] = speech
>>> return batch
>>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_features = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt").input_features # Batch size 1
>>> generated_ids = model.generate(input_ids=input_features)
>>> transcription = processor.batch_decode(generated_ids)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_features,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
past_key_values=past_key_values,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| 46.070214 | 239 | 0.657485 |
import math
import random
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_speech_to_text import Speech2TextConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "Speech2TextConfig"
_TOKENIZER_FOR_DOC = "Speech2TextTokenizer"
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/s2t-small-librispeech-asr",
]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
class Conv1dSubsampler(nn.Module):
def __init__(self, config):
super(Conv1dSubsampler, self).__init__()
self.config = config
self.num_layers = config.num_conv_layers
self.in_channels = config.input_feat_per_channel * config.input_channels
self.mid_channels = config.conv_channels
self.out_channels = config.d_model
self.kernel_sizes = config.conv_kernel_sizes
self.conv_layers = nn.ModuleList(
nn.Conv1d(
self.in_channels if i == 0 else self.mid_channels // 2,
self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
kernel_size=k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(self.kernel_sizes)
)
def forward(self, input_features):
hidden_states = input_features.transpose(1, 2).contiguous()
for conv in self.conv_layers:
hidden_states = conv(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
hidden_states = hidden_states.transpose(1, 2).contiguous()
return hidden_states
class Speech2TextSinusoidalPositionalEmbedding(nn.Module):
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
emb_weights = emb_weights.to(self.weights.device)
self.weights = nn.Parameter(emb_weights)
self.weights.requires_grad = False
self.weights.detach_()
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
@torch.no_grad()
def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
bsz, seq_len = input_ids.size()
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
input_ids.device
)
max_pos = self.padding_idx + 1 + seq_len
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
def create_position_ids_from_input_ids(
self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
):
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
class Speech2TextAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention and past_key_value is not None:
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class Speech2TextEncoderLayer(nn.Module):
def __init__(self, config: Speech2TextConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Speech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class Speech2TextDecoderLayer(nn.Module):
def __init__(self, config: Speech2TextConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Speech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = Speech2TextAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class Speech2TextPreTrainedModel(PreTrainedModel):
config_class = Speech2TextConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, (nn.Linear, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _get_subsampled_output_lengths(self, input_lengths: torch.LongTensor):
for i in range(self.config.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def _get_subsampled_encoder_attn_mask(self, attention_mask):
# generate creates 3D attention mask, becuase of the shape of input_features
# convert it to 2D if thats the case
if len(attention_mask.shape) > 2:
attention_mask = attention_mask[:, :, -1]
subsampled_lengths = self._get_subsampled_output_lengths(attention_mask.sum(-1))
max_len = subsampled_lengths.max().item()
bsz = attention_mask.size()[0]
attention_mask = torch.zeros((bsz, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
return attention_mask
SPEECH_TO_TEXT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.Speech2TextConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_features (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
by loading a ``.flac`` or ``.wav`` audio file into an array of type :obj:`List[float]` or a
:obj:`numpy.ndarray`, *e.g.* via the soundfile library (``pip install soundfile``). To prepare the array
into :obj:`input_features`, the :class:`~transformers.Speech2TextTokenizer` should be used for extracting
the fbank features, padding and conversion into a tensor of type :obj:`torch.FloatTensor`. See
:meth:`~transformers.Speech2TextTokenizer.__call__`
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in ``[0,
1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the :obj:`input_ids` to the right, following the paper.
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read
:func:`modeling_speech_to_text._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the
paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy.
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class Speech2TextEncoder(Speech2TextPreTrainedModel):
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv = Conv1dSubsampler(config)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_source_positions,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def forward(
self,
input_features,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is not None:
attention_mask = self._get_subsampled_encoder_attn_mask(attention_mask)
inputs_embeds = self.conv(input_features)
inputs_embeds = self.embed_scale * inputs_embeds
if attention_mask is None:
padding_mask = torch.zeros_like(inputs_embeds, dtype=torch.long)
else:
padding_mask = attention_mask.ne(1).long()
embed_pos = self.embed_positions(padding_mask)
hidden_states = inputs_embeds + embed_pos
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class Speech2TextDecoder(Speech2TextPreTrainedModel):
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_target_positions
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_target_positions,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = self._get_subsampled_encoder_attn_mask(encoder_attention_mask)
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
encoder_head_mask[idx] if encoder_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class Speech2TextModel(Speech2TextPreTrainedModel):
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.encoder = Speech2TextEncoder(config)
self.decoder = Speech2TextDecoder(config)
self.init_weights()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="s2t_transformer_s",
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_features=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_features,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
encoder_head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The Speech2Text Model with a language modeling head. Can be used for summarization.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"encoder\.version",
r"decoder\.version",
r"model.encoder.embed_positions.weights",
r"model.decoder.embed_positions.weights",
]
_keys_to_ignore_on_save = [
r"model.encoder.embed_positions.weights",
r"model.decoder.embed_positions.weights",
]
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.model = Speech2TextModel(config)
self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False)
self.init_weights()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
return new_embeddings
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_features=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_features,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
past_key_values=past_key_values,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| true | true |
1c3c6f1172e73e35d9a349f5a55c568beb7b1891 | 7,618 | py | Python | scripts/update_schemas.py | felixkj123/bmcweb | 91243c3b28b1df66e682f5a3ee96341fdc516b5a | [
"Apache-2.0"
] | null | null | null | scripts/update_schemas.py | felixkj123/bmcweb | 91243c3b28b1df66e682f5a3ee96341fdc516b5a | [
"Apache-2.0"
] | null | null | null | scripts/update_schemas.py | felixkj123/bmcweb | 91243c3b28b1df66e682f5a3ee96341fdc516b5a | [
"Apache-2.0"
] | 1 | 2021-06-23T10:28:02.000Z | 2021-06-23T10:28:02.000Z | #!/usr/bin/python3
import requests
import zipfile
from io import BytesIO
import os
from collections import defaultdict
from collections import OrderedDict
from distutils.version import StrictVersion
import shutil
import json
import glob
import xml.etree.ElementTree as ET
VERSION = "DSP8010_2019.2"
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
proxies = {
'https': os.environ.get("https_proxy", None)
}
r = requests.get(
'https://www.dmtf.org/sites/default/files/standards/documents/' +
VERSION +
'.zip',
proxies=proxies)
r.raise_for_status()
static_path = os.path.realpath(os.path.join(SCRIPT_DIR, "..", "static",
"redfish", "v1"))
schema_path = os.path.join(static_path, "schema")
json_schema_path = os.path.join(static_path, "JsonSchemas")
metadata_index_path = os.path.join(static_path, "$metadata", "index.xml")
zipBytesIO = BytesIO(r.content)
zip_ref = zipfile.ZipFile(zipBytesIO)
# Remove the old files
if os.path.exists(schema_path):
files = glob.glob(os.path.join(schema_path, '[!Oem]*'))
for f in files:
os.remove(f)
if os.path.exists(json_schema_path):
files = glob.glob(os.path.join(json_schema_path, '[!Oem]*'))
for f in files:
if (os.path.isfile(f)):
os.remove(f)
else:
shutil.rmtree(f)
os.remove(metadata_index_path)
if not os.path.exists(schema_path):
os.makedirs(schema_path)
if not os.path.exists(json_schema_path):
os.makedirs(json_schema_path)
with open(metadata_index_path, 'w') as metadata_index:
metadata_index.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
metadata_index.write(
"<edmx:Edmx xmlns:edmx=\"http://docs.oasis-open.org/odata/ns/edmx\" Version=\"4.0\">\n")
for zip_filepath in zip_ref.namelist():
if zip_filepath.startswith(VERSION +
'/' +
VERSION +
'/csdl/') & (zip_filepath != VERSION +
"/csdl/") & (zip_filepath != VERSION +
'/' +
VERSION +
"/csdl/"):
filename = os.path.basename(zip_filepath)
with open(os.path.join(schema_path, filename), 'wb') as schema_file:
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/" +
filename +
"\">\n")
content = zip_ref.read(zip_filepath)
content = content.replace(b'\r\n', b'\n')
xml_root = ET.fromstring(content)
for edmx_child in xml_root:
if edmx_child.tag == "{http://docs.oasis-open.org/odata/ns/edmx}DataServices":
for data_child in edmx_child:
if data_child.tag == "{http://docs.oasis-open.org/odata/ns/edm}Schema":
namespace = data_child.attrib["Namespace"]
if namespace.startswith("RedfishExtensions"):
metadata_index.write(
" <edmx:Include Namespace=\"" + namespace + "\" Alias=\"Redfish\"/>\n")
else:
metadata_index.write(
" <edmx:Include Namespace=\"" + namespace + "\"/>\n")
schema_file.write(content)
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write(""" <edmx:DataServices>
<Schema xmlns="http://docs.oasis-open.org/odata/ns/edm" Namespace="Service">
<EntityContainer Name="Service" Extends="ServiceRoot.v1_0_0.ServiceContainer"/>
</Schema>
</edmx:DataServices>
""")
# TODO:Issue#32 There's a bug in the script that currently deletes this
# schema (because it's an OEM schema). Because it's the only one, and we
# don't update schemas very often, we just manually fix it. Need a
# permanent fix to the script.
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/OemManager_v1.xml\">\n")
metadata_index.write(" <edmx:Include Namespace=\"OemManager\"/>\n")
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write("</edmx:Edmx>\n")
schema_files = {}
for zip_filepath in zip_ref.namelist():
if zip_filepath.startswith(os.path.join(VERSION, VERSION, 'json-schema/')):
filename = os.path.basename(zip_filepath)
filenamesplit = filename.split(".")
if len(filenamesplit) == 3:
thisSchemaVersion = schema_files.get(filenamesplit[0], None)
if thisSchemaVersion is None:
schema_files[filenamesplit[0]] = filenamesplit[1]
else:
# need to see if we're a newer version.
if list(map(int, filenamesplit[1][1:].split("_"))) > list(map(
int, thisSchemaVersion[1:].split("_"))):
schema_files[filenamesplit[0]] = filenamesplit[1]
for schema, version in schema_files.items():
basename = schema + "." + version + ".json"
zip_filepath = os.path.join(VERSION, VERSION, "json-schema", basename)
schemadir = os.path.join(json_schema_path, schema)
os.makedirs(schemadir)
location_json = OrderedDict()
location_json["Language"] = "en"
location_json["PublicationUri"] = (
"http://redfish.dmtf.org/schemas/v1/" + schema + ".json")
location_json["Uri"] = (
"/redfish/v1/JsonSchemas/" + schema + "/" + schema + ".json")
index_json = OrderedDict()
index_json["@odata.context"] = "/redfish/v1/$metadata#JsonSchemaFile.JsonSchemaFile"
index_json["@odata.id"] = "/redfish/v1/JsonSchemas/" + schema
index_json["@odata.type"] = "#JsonSchemaFile.v1_0_2.JsonSchemaFile"
index_json["Name"] = schema + " Schema File"
index_json["Schema"] = "#" + schema + "." + schema
index_json["Description"] = schema + " Schema File Location"
index_json["Id"] = schema
index_json["Languages"] = ["en"]
index_json["Languages@odata.count"] = 1
index_json["Location"] = [location_json]
index_json["Location@odata.count"] = 1
with open(os.path.join(schemadir, "index.json"), 'w') as schema_file:
json.dump(index_json, schema_file, indent=4)
with open(os.path.join(schemadir, schema + ".json"), 'wb') as schema_file:
schema_file.write(zip_ref.read(zip_filepath).replace(b'\r\n', b'\n'))
with open(os.path.join(json_schema_path, "index.json"), 'w') as index_file:
members = [{"@odata.id": "/redfish/v1/JsonSchemas/" + schema}
for schema in schema_files]
members.sort(key=lambda x: x["@odata.id"])
indexData = OrderedDict()
indexData["@odata.id"] = "/redfish/v1/JsonSchemas"
indexData["@odata.context"] = ("/redfish/v1/$metadata"
"#JsonSchemaFileCollection."
"JsonSchemaFileCollection")
indexData["@odata.type"] = ("#JsonSchemaFileCollection."
"JsonSchemaFileCollection")
indexData["Name"] = "JsonSchemaFile Collection"
indexData["Description"] = "Collection of JsonSchemaFiles"
indexData["Members@odata.count"] = len(schema_files)
indexData["Members"] = members
json.dump(indexData, index_file, indent=2)
zip_ref.close()
| 40.737968 | 119 | 0.582699 |
import requests
import zipfile
from io import BytesIO
import os
from collections import defaultdict
from collections import OrderedDict
from distutils.version import StrictVersion
import shutil
import json
import glob
import xml.etree.ElementTree as ET
VERSION = "DSP8010_2019.2"
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
proxies = {
'https': os.environ.get("https_proxy", None)
}
r = requests.get(
'https://www.dmtf.org/sites/default/files/standards/documents/' +
VERSION +
'.zip',
proxies=proxies)
r.raise_for_status()
static_path = os.path.realpath(os.path.join(SCRIPT_DIR, "..", "static",
"redfish", "v1"))
schema_path = os.path.join(static_path, "schema")
json_schema_path = os.path.join(static_path, "JsonSchemas")
metadata_index_path = os.path.join(static_path, "$metadata", "index.xml")
zipBytesIO = BytesIO(r.content)
zip_ref = zipfile.ZipFile(zipBytesIO)
if os.path.exists(schema_path):
files = glob.glob(os.path.join(schema_path, '[!Oem]*'))
for f in files:
os.remove(f)
if os.path.exists(json_schema_path):
files = glob.glob(os.path.join(json_schema_path, '[!Oem]*'))
for f in files:
if (os.path.isfile(f)):
os.remove(f)
else:
shutil.rmtree(f)
os.remove(metadata_index_path)
if not os.path.exists(schema_path):
os.makedirs(schema_path)
if not os.path.exists(json_schema_path):
os.makedirs(json_schema_path)
with open(metadata_index_path, 'w') as metadata_index:
metadata_index.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
metadata_index.write(
"<edmx:Edmx xmlns:edmx=\"http://docs.oasis-open.org/odata/ns/edmx\" Version=\"4.0\">\n")
for zip_filepath in zip_ref.namelist():
if zip_filepath.startswith(VERSION +
'/' +
VERSION +
'/csdl/') & (zip_filepath != VERSION +
"/csdl/") & (zip_filepath != VERSION +
'/' +
VERSION +
"/csdl/"):
filename = os.path.basename(zip_filepath)
with open(os.path.join(schema_path, filename), 'wb') as schema_file:
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/" +
filename +
"\">\n")
content = zip_ref.read(zip_filepath)
content = content.replace(b'\r\n', b'\n')
xml_root = ET.fromstring(content)
for edmx_child in xml_root:
if edmx_child.tag == "{http://docs.oasis-open.org/odata/ns/edmx}DataServices":
for data_child in edmx_child:
if data_child.tag == "{http://docs.oasis-open.org/odata/ns/edm}Schema":
namespace = data_child.attrib["Namespace"]
if namespace.startswith("RedfishExtensions"):
metadata_index.write(
" <edmx:Include Namespace=\"" + namespace + "\" Alias=\"Redfish\"/>\n")
else:
metadata_index.write(
" <edmx:Include Namespace=\"" + namespace + "\"/>\n")
schema_file.write(content)
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write(""" <edmx:DataServices>
<Schema xmlns="http://docs.oasis-open.org/odata/ns/edm" Namespace="Service">
<EntityContainer Name="Service" Extends="ServiceRoot.v1_0_0.ServiceContainer"/>
</Schema>
</edmx:DataServices>
""")
e only one, and we
# don't update schemas very often, we just manually fix it. Need a
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/OemManager_v1.xml\">\n")
metadata_index.write(" <edmx:Include Namespace=\"OemManager\"/>\n")
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write("</edmx:Edmx>\n")
schema_files = {}
for zip_filepath in zip_ref.namelist():
if zip_filepath.startswith(os.path.join(VERSION, VERSION, 'json-schema/')):
filename = os.path.basename(zip_filepath)
filenamesplit = filename.split(".")
if len(filenamesplit) == 3:
thisSchemaVersion = schema_files.get(filenamesplit[0], None)
if thisSchemaVersion is None:
schema_files[filenamesplit[0]] = filenamesplit[1]
else:
if list(map(int, filenamesplit[1][1:].split("_"))) > list(map(
int, thisSchemaVersion[1:].split("_"))):
schema_files[filenamesplit[0]] = filenamesplit[1]
for schema, version in schema_files.items():
basename = schema + "." + version + ".json"
zip_filepath = os.path.join(VERSION, VERSION, "json-schema", basename)
schemadir = os.path.join(json_schema_path, schema)
os.makedirs(schemadir)
location_json = OrderedDict()
location_json["Language"] = "en"
location_json["PublicationUri"] = (
"http://redfish.dmtf.org/schemas/v1/" + schema + ".json")
location_json["Uri"] = (
"/redfish/v1/JsonSchemas/" + schema + "/" + schema + ".json")
index_json = OrderedDict()
index_json["@odata.context"] = "/redfish/v1/$metadata#JsonSchemaFile.JsonSchemaFile"
index_json["@odata.id"] = "/redfish/v1/JsonSchemas/" + schema
index_json["@odata.type"] = "#JsonSchemaFile.v1_0_2.JsonSchemaFile"
index_json["Name"] = schema + " Schema File"
index_json["Schema"] = "#" + schema + "." + schema
index_json["Description"] = schema + " Schema File Location"
index_json["Id"] = schema
index_json["Languages"] = ["en"]
index_json["Languages@odata.count"] = 1
index_json["Location"] = [location_json]
index_json["Location@odata.count"] = 1
with open(os.path.join(schemadir, "index.json"), 'w') as schema_file:
json.dump(index_json, schema_file, indent=4)
with open(os.path.join(schemadir, schema + ".json"), 'wb') as schema_file:
schema_file.write(zip_ref.read(zip_filepath).replace(b'\r\n', b'\n'))
with open(os.path.join(json_schema_path, "index.json"), 'w') as index_file:
members = [{"@odata.id": "/redfish/v1/JsonSchemas/" + schema}
for schema in schema_files]
members.sort(key=lambda x: x["@odata.id"])
indexData = OrderedDict()
indexData["@odata.id"] = "/redfish/v1/JsonSchemas"
indexData["@odata.context"] = ("/redfish/v1/$metadata"
"#JsonSchemaFileCollection."
"JsonSchemaFileCollection")
indexData["@odata.type"] = ("#JsonSchemaFileCollection."
"JsonSchemaFileCollection")
indexData["Name"] = "JsonSchemaFile Collection"
indexData["Description"] = "Collection of JsonSchemaFiles"
indexData["Members@odata.count"] = len(schema_files)
indexData["Members"] = members
json.dump(indexData, index_file, indent=2)
zip_ref.close()
| true | true |
1c3c7157849f130a209973d405d5e109baa1b505 | 3,467 | py | Python | app.py | mfronistas/LotteryWebAppFiles | 7e1a71c4198063f01bf7eca1980826e11787093e | [
"Apache-2.0"
] | null | null | null | app.py | mfronistas/LotteryWebAppFiles | 7e1a71c4198063f01bf7eca1980826e11787093e | [
"Apache-2.0"
] | null | null | null | app.py | mfronistas/LotteryWebAppFiles | 7e1a71c4198063f01bf7eca1980826e11787093e | [
"Apache-2.0"
] | null | null | null | # IMPORTS
import socket
from functools import wraps
import logging
from flask import Flask, render_template, request
from flask_login import LoginManager, current_user
from flask_sqlalchemy import SQLAlchemy
from flask_talisman import Talisman
# LOGGING
class SecurityFilter(logging.Filter):
def filter(self, record):
return 'SECURITY' in record.getMessage()
fh = logging.FileHandler('lottery.log', 'w')
fh.setLevel(logging.WARNING)
fh.addFilter(SecurityFilter())
formatter = logging.Formatter('%(asctime)s : %(message)s', '%m/%d/%Y %I:%M:%S %p')
fh.setFormatter(formatter)
logger = logging.getLogger('')
logger.propagate = False
logger.addHandler(fh)
# CONFIG
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///lottery.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'LongAndRandomSecretKey'
# initialise database
db = SQLAlchemy(app)
# Implementing security headers
csp = {
' default-scr': [
'\'self\'',
'https://cdnjs.cloudflare.com/ajax/libs/bulma/0.7.2/css/bulma.min.css'
],
'script-src': [
'\'self\'',
'\'unsafe-inline\''
]
}
talisman = Talisman(app, content_security_policy=csp)
# FUNCTIONS
def requires_roles(*roles):
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
if current_user.role not in roles:
logging.warning('SECURITY - Unauthorised access attempt [%s, %s, %s, %s]',
current_user.id,
current_user.email,
current_user.role,
request.remote_addr)
# Redirect to unauthorized notice
return render_template('403.html')
return f(*args, **kwargs)
return wrapped
return wrapper
# HOME PAGE VIEW
@app.route('/')
def index():
print(request.headers)
return render_template('index.html')
# ERROR PAGE VIEWS
@app.errorhandler(403)
def page_forbidden(error):
return render_template('403.html'), 403
@app.errorhandler(400)
def bad_request(error):
return render_template('400.html'), 400
@app.errorhandler(503)
def service_unavailable(error):
return render_template('503.html'), 503
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
if __name__ == "__main__":
my_host = "127.0.0.1"
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind((my_host, 0))
free_socket.listen(5)
free_port = free_socket.getsockname()[1]
free_socket.close()
# Implementing login manager for page login
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.init_app(app)
from models import User
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
# BLUEPRINTS
# import blueprints
from users.views import users_blueprint
from admin.views import admin_blueprint
from lottery.views import lottery_blueprint
# register blueprints with app
app.register_blueprint(users_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(lottery_blueprint)
# Running app with ssl
app.run(host=my_host, port=free_port, debug=True, ssl_context=('cert.pem', 'key.pem'))
| 27.515873 | 90 | 0.676954 |
import socket
from functools import wraps
import logging
from flask import Flask, render_template, request
from flask_login import LoginManager, current_user
from flask_sqlalchemy import SQLAlchemy
from flask_talisman import Talisman
class SecurityFilter(logging.Filter):
def filter(self, record):
return 'SECURITY' in record.getMessage()
fh = logging.FileHandler('lottery.log', 'w')
fh.setLevel(logging.WARNING)
fh.addFilter(SecurityFilter())
formatter = logging.Formatter('%(asctime)s : %(message)s', '%m/%d/%Y %I:%M:%S %p')
fh.setFormatter(formatter)
logger = logging.getLogger('')
logger.propagate = False
logger.addHandler(fh)
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///lottery.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'LongAndRandomSecretKey'
db = SQLAlchemy(app)
csp = {
' default-scr': [
'\'self\'',
'https://cdnjs.cloudflare.com/ajax/libs/bulma/0.7.2/css/bulma.min.css'
],
'script-src': [
'\'self\'',
'\'unsafe-inline\''
]
}
talisman = Talisman(app, content_security_policy=csp)
def requires_roles(*roles):
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
if current_user.role not in roles:
logging.warning('SECURITY - Unauthorised access attempt [%s, %s, %s, %s]',
current_user.id,
current_user.email,
current_user.role,
request.remote_addr)
return render_template('403.html')
return f(*args, **kwargs)
return wrapped
return wrapper
@app.route('/')
def index():
print(request.headers)
return render_template('index.html')
@app.errorhandler(403)
def page_forbidden(error):
return render_template('403.html'), 403
@app.errorhandler(400)
def bad_request(error):
return render_template('400.html'), 400
@app.errorhandler(503)
def service_unavailable(error):
return render_template('503.html'), 503
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
if __name__ == "__main__":
my_host = "127.0.0.1"
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind((my_host, 0))
free_socket.listen(5)
free_port = free_socket.getsockname()[1]
free_socket.close()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.init_app(app)
from models import User
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
from users.views import users_blueprint
from admin.views import admin_blueprint
from lottery.views import lottery_blueprint
app.register_blueprint(users_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(lottery_blueprint)
app.run(host=my_host, port=free_port, debug=True, ssl_context=('cert.pem', 'key.pem'))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.