function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def is_Word_Joiner(U, UISC, UGC):
return U == 0x2060 | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_Reserved(U, UISC, UGC):
return UGC == 'Cn' | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_SAKOT(U, UISC, UGC):
return U == 0x1A60 | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_SYM_MOD(U, UISC, UGC):
return U in [0x1B6B, 0x1B6C, 0x1B6D, 0x1B6E, 0x1B6F, 0x1B70, 0x1B71, 0x1B72, 0x1B73] | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_VOWEL(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/376
return (UISC == Pure_Killer or
(UGC != Lo and UISC in [Vowel, Vowel_Dependent] and U not in [0xAA29])) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def map_to_use(data):
out = {}
items = use_mapping.items()
for U,(UISC,UIPC,UGC,UBlock) in data.items():
# Resolve Indic_Syllabic_Category
# TODO: These don't have UISC assigned in Unicode 12.0, but have UIPC
if 0x1CE2 <= U <= 0x1CE8: UISC = Cantillation_Mark
# Tibetan:
# TODO: These don't have UISC assigned in Unicode 12.0, but have UIPC
if 0x0F18 <= U <= 0x0F19 or 0x0F3E <= U <= 0x0F3F: UISC = Vowel_Dependent
if 0x0F86 <= U <= 0x0F87: UISC = Tone_Mark
# Overrides to allow NFC order matching syllable
# https://github.com/harfbuzz/harfbuzz/issues/1012
if UBlock == 'Tibetan' and is_VOWEL (U, UISC, UGC):
if UIPC == Top:
UIPC = Bottom
# TODO: https://github.com/harfbuzz/harfbuzz/pull/982
# also https://github.com/harfbuzz/harfbuzz/issues/1012
if UBlock == 'Chakma' and is_VOWEL (U, UISC, UGC):
if UIPC == Top:
UIPC = Bottom
elif UIPC == Bottom:
UIPC = Top
# TODO: https://github.com/harfbuzz/harfbuzz/pull/627
if 0x1BF2 <= U <= 0x1BF3: UISC = Nukta; UIPC = Bottom
# TODO: U+1CED should only be allowed after some of
# the nasalization marks, maybe only for U+1CE9..U+1CF1.
if U == 0x1CED: UISC = Tone_Mark
# TODO: https://github.com/harfbuzz/harfbuzz/issues/1105
if U == 0x11134: UISC = Gemination_Mark
values = [k for k,v in items if v(U,UISC,UGC)]
assert len(values) == 1, "%s %s %s %s" % (hex(U), UISC, UGC, values)
USE = values[0]
# Resolve Indic_Positional_Category
# TODO: These should die, but have UIPC in Unicode 12.0
if U in [0x953, 0x954]: UIPC = Not_Applicable
# TODO: In USE's override list but not in Unicode 12.0
if U == 0x103C: UIPC = Left
# TODO: https://github.com/harfbuzz/harfbuzz/pull/2012
if U == 0x1C29: UIPC = Left
# TODO: These are not in USE's override list that we have, nor are they in Unicode 12.0
if 0xA926 <= U <= 0xA92A: UIPC = Top
# TODO: https://github.com/harfbuzz/harfbuzz/pull/1037
# and https://github.com/harfbuzz/harfbuzz/issues/1631
if U in [0x11302, 0x11303, 0x114C1]: UIPC = Top
if U == 0x1171E: UIPC = Left
if 0x1CF8 <= U <= 0x1CF9: UIPC = Top
assert (UIPC in [Not_Applicable, Visual_Order_Left] or
USE in use_positions), "%s %s %s %s %s" % (hex(U), UIPC, USE, UISC, UGC)
pos_mapping = use_positions.get(USE, None)
if pos_mapping:
values = [k for k,v in pos_mapping.items() if v and UIPC in v]
assert len(values) == 1, "%s %s %s %s %s %s" % (hex(U), UIPC, USE, UISC, UGC, values)
USE = USE + values[0]
out[U] = (USE, UBlock)
return out | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def print_block (block, start, end, data):
global total, used, last_block
if block and block != last_block:
print ()
print ()
print (" /* %s */" % block)
if start % 16:
print (' ' * (20 + (start % 16 * 6)), end='')
num = 0
assert start % 8 == 0
assert (end+1) % 8 == 0
for u in range (start, end+1):
if u % 16 == 0:
print ()
print (" /* %04X */" % u, end='')
if u in data:
num += 1
d = data.get (u, defaults)
print ("%6s," % d[0], end='')
total += end - start + 1
used += num
if block:
last_block = block | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, rpc_error):
Exception.__init__(self)
self.error = rpc_error | bitcoin-hivemind/hivemind | [
109,
6,
109,
45,
1442411135
] |
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None):
self.__service_url = service_url
self.__service_name = service_name
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
None, None, False,
timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
False, timeout) | bitcoin-hivemind/hivemind | [
109,
6,
109,
45,
1442411135
] |
def __call__(self, *args):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self.__service_name,
json.dumps(args, default=EncodeDecimal)))
postdata = json.dumps({'version': '1.1',
'method': self.__service_name,
'params': args,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal)
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
response = self._get_response()
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result'] | bitcoin-hivemind/hivemind | [
109,
6,
109,
45,
1442411135
] |
def get_data():
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)[:max_train_samples]
X_test = X_test.reshape(10000, 784)[:max_test_samples]
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
# convert class vectors to binary class matrices
y_train = y_train[:max_train_samples]
y_test = y_test[:max_test_samples]
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
test_ids = np.where(y_test == np.array(weighted_class))[0]
return (X_train, Y_train), (X_test, Y_test), test_ids | farizrahman4u/keras-contrib | [
1578,
655,
1578,
191,
1485311209
] |
def can_edit(account, gallery):
return not ((gallery.team and not team_control.is_member(account, gallery.team)) or
(not gallery.team and gallery.created_by != account)) | F483/bikesurf.org | [
8,
5,
8,
60,
1413752765
] |
def delete(account, gallery):
""" Delete gallery and all pictures belonging to it. """
_assert_can_edit(account, gallery)
for picture in gallery.pictures.all():
remove(account, picture)
gallery.delete() | F483/bikesurf.org | [
8,
5,
8,
60,
1413752765
] |
def setprimary(account, picture):
""" Set picture as the galleries primary picture. """
gallery = picture.gallery
_assert_can_edit(account, gallery)
gallery.primary = picture
gallery.save() | F483/bikesurf.org | [
8,
5,
8,
60,
1413752765
] |
def __init__(self, callWhenBuilderListSet):
self.callWhenBuilderListSet = callWhenBuilderListSet
self.master_persp = None
self._detach_deferreds = []
self._detached = False | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def setMasterPerspective(self, persp):
self.master_persp = persp
# clear out master_persp on disconnect
def clear_persp():
self.master_persp = None
persp.broker.notifyOnDisconnect(clear_persp)
def fire_deferreds():
self._detached = True
self._detach_deferreds, deferreds = None, self._detach_deferreds
for d in deferreds:
d.callback(None)
persp.broker.notifyOnDisconnect(fire_deferreds) | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def remote_getWorkerInfo(self):
return {
'info': 'here',
'worker_commands': {
'x': 1,
},
'numcpus': 1,
'none': None,
'os_release': b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode(),
b'\xe3\x83\xaa\xe3\x83\xaa\xe3\x83\xbc\xe3\x82\xb9\xe3'
b'\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode():
b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode(),
} | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def remote_getCommands(self):
return {'x': 1} | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def attached(self, worker, commands):
return defer.succeed(None) | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def getOldestRequestTime(self):
return 0 | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def attached(self, conn):
self.detach_d = defer.Deferred()
return super().attached(conn) | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantMq=True, wantData=True,
wantDb=True)
# set the worker port to a loopback address with unspecified
# port
self.pbmanager = self.master.pbmanager = pbmanager.PBManager()
yield self.pbmanager.setServiceParent(self.master)
# remove the fakeServiceParent from fake service hierarchy, and replace
# by a real one
yield self.master.workers.disownServiceParent()
self.workers = self.master.workers = workermanager.WorkerManager(
self.master)
yield self.workers.setServiceParent(self.master)
self.botmaster = botmaster.BotMaster()
yield self.botmaster.setServiceParent(self.master)
self.master.botmaster = self.botmaster
self.master.data.updates.workerConfigured = lambda *a, **k: None
yield self.master.startService()
self.buildworker = None
self.port = None
self.workerworker = None
self.endpoint = None
self.broker = None
self._detach_deferreds = []
# patch in our FakeBuilder for the regular Builder class
self.patch(botmaster, 'Builder', FakeBuilder)
self.server_connection_string = "tcp:0:interface=127.0.0.1"
self.client_connection_string_tpl = "tcp:host=127.0.0.1:port={port}" | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def addWorker(self, **kwargs):
"""
Create a master-side worker instance and add it to the BotMaster
@param **kwargs: arguments to pass to the L{Worker} constructor.
"""
self.buildworker = MyWorker("testworker", "pw", **kwargs)
# reconfig the master to get it set up
new_config = self.master.config
new_config.protocols = {"pb": {"port": self.server_connection_string}}
new_config.workers = [self.buildworker]
new_config.builders = [config.BuilderConfig(
name='bldr',
workername='testworker', factory=factory.BuildFactory())]
yield self.botmaster.reconfigServiceWithBuildbotConfig(new_config)
yield self.workers.reconfigServiceWithBuildbotConfig(new_config)
# as part of the reconfig, the worker registered with the pbmanager, so
# get the port it was assigned
self.port = self.buildworker.registration.getPBPort() | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def logged_in(persp):
workerworker.setMasterPerspective(persp)
# set up to hear when the worker side disconnects
workerworker.detach_d = defer.Deferred()
persp.broker.notifyOnDisconnect(
lambda: workerworker.detach_d.callback(None))
self._detach_deferreds.append(workerworker.detach_d)
return workerworker | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def workerSideDisconnect(self, worker):
"""Disconnect from the worker side"""
worker.master_persp.broker.transport.loseConnection() | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def test_connect_disconnect(self):
"""Test a single worker connecting and disconnecting."""
yield self.addWorker()
# connect
worker = yield self.connectWorker()
# disconnect
self.workerSideDisconnect(worker)
# wait for the resulting detach
yield worker.waitForDetach() | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def test_tls_connect_disconnect(self):
"""Test with TLS or SSL endpoint.
According to the deprecation note for the SSL client endpoint,
the TLS endpoint is supported from Twistd 16.0.
TODO add certificate verification (also will require some conditionals
on various versions, including PyOpenSSL, service_identity. The CA used
to generate the testing cert is in ``PKI_DIR/ca``
"""
def escape_colon(path):
# on windows we can't have \ as it serves as the escape character for :
return path.replace('\\', '/').replace(':', '\\:')
self.server_connection_string = (
"ssl:port=0:certKey={pub}:privateKey={priv}:" +
"interface=127.0.0.1").format(
pub=escape_colon(os.path.join(PKI_DIR, '127.0.0.1.crt')),
priv=escape_colon(os.path.join(PKI_DIR, '127.0.0.1.key')))
self.client_connection_string_tpl = "ssl:host=127.0.0.1:port={port}"
yield self.addWorker()
# connect
worker = yield self.connectWorker()
# disconnect
self.workerSideDisconnect(worker)
# wait for the resulting detach
yield worker.waitForDetach() | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def test_worker_info(self):
yield self.addWorker()
worker = yield self.connectWorker()
props = self.buildworker.info
# check worker info passing
self.assertEqual(props.getProperty("info"),
"here")
# check worker info passing with UTF-8
self.assertEqual(props.getProperty("os_release"),
b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode())
self.assertEqual(props.getProperty(b'\xe3\x83\xaa\xe3\x83\xaa\xe3\x83\xbc\xe3\x82'
b'\xb9\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode()),
b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode())
self.assertEqual(props.getProperty("none"), None)
self.assertEqual(props.getProperty("numcpus"), 1)
self.workerSideDisconnect(worker)
yield worker.waitForDetach() | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def _test_duplicate_worker(self):
yield self.addWorker()
# connect first worker
worker1 = yield self.connectWorker()
# connect second worker; this should fail
try:
yield self.connectWorker(waitForBuilderList=False)
connect_failed = False
except Exception:
connect_failed = True
self.assertTrue(connect_failed)
# disconnect both and wait for that to percolate
self.workerSideDisconnect(worker1)
yield worker1.waitForDetach()
# flush the exception logged for this on the master
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1) | tardyp/buildbot | [
6,
2,
6,
1,
1285148030
] |
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def f():
for x in range(10):
break
else:
y = 2
z = 3 | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def g():
while True:
break
else:
y = 2
z = 3 | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def f():
x = 1
def g():
nonlocal x
x = 2
y = 7
def h():
nonlocal x, y | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def assertASTEqual(self, ast1, ast2):
self.assertEqual(ast.dump(ast1), ast.dump(ast2)) | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_fstrings(self):
# See issue 25180
self.check_roundtrip(r"""f'{f"{0}"*3}'""")
self.check_roundtrip(r"""f'{f"{y}"*3}'""") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_shifts(self):
self.check_roundtrip("45 << 2")
self.check_roundtrip("13 >> 7") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_while_else(self):
self.check_roundtrip(while_else) | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_integer_parens(self):
self.check_roundtrip("3 .__abs__()") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_min_int(self):
self.check_roundtrip(str(-2**31))
self.check_roundtrip(str(-2**63)) | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_lambda_parentheses(self):
self.check_roundtrip("(lambda: int)()") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_function_arguments(self):
self.check_roundtrip("def f(): pass")
self.check_roundtrip("def f(a): pass")
self.check_roundtrip("def f(b = 2): pass")
self.check_roundtrip("def f(a, b): pass")
self.check_roundtrip("def f(a, b = 2): pass")
self.check_roundtrip("def f(a = 5, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b): pass")
self.check_roundtrip("def f(*, a, b = 2): pass")
self.check_roundtrip("def f(a, b = None, *, c, **kwds): pass")
self.check_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
self.check_roundtrip("def f(*args, **kwargs): pass") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_nonlocal(self):
self.check_roundtrip(nonlocal_ex) | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_bytes(self):
self.check_roundtrip("b'123'") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_set_literal(self):
self.check_roundtrip("{'a', 'b', 'c'}") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_dict_comprehension(self):
self.check_roundtrip("{x: x*x for x in range(10)}") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_class_definition(self):
self.check_roundtrip("class A(metaclass=type, *[], **{}): pass") | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_try_except_finally(self):
self.check_roundtrip(try_except_finally) | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_with_simple(self):
self.check_roundtrip(with_simple) | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def test_with_two_items(self):
self.check_roundtrip(with_two_items) | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def get_names(cls):
if cls.NAMES is not None:
return cls.NAMES
names = []
for d in cls.test_directories:
test_dir = os.path.join(basepath, d)
for n in os.listdir(test_dir):
if n.endswith('.py') and not n.startswith('bad'):
names.append(os.path.join(test_dir, n))
# Test limited subset of files unless the 'cpu' resource is specified.
if not test.support.is_resource_enabled("cpu"):
names = random.sample(names, 10)
# bpo-31174: Store the names sample to always test the same files.
# It prevents false alarms when hunting reference leaks.
cls.NAMES = names
return names | FFMG/myoddweb.piger | [
16,
2,
16,
2,
1456065110
] |
def load(path=DEFAULT_PATH):
return loadfile(open(path)) | atmark-techno/atmark-dist | [
3,
2,
3,
4,
1476164728
] |
def __init__(self, name):
self.name = name
self.args = []
self.result_type = ''
self.result_refs = None | atmark-techno/atmark-dist | [
3,
2,
3,
4,
1476164728
] |
def main():
d = load()
dump(d) | atmark-techno/atmark-dist | [
3,
2,
3,
4,
1476164728
] |
def __init__(self):
TestBase.__init__(self, 'float-libcall', result=""" | namhyung/uftrace | [
2338,
351,
2338,
337,
1392115490
] |
def build(self, name, cflags='', ldflags=''):
# cygprof doesn't support arguments now
if cflags.find('-finstrument-functions') >= 0:
return TestBase.TEST_SKIP
ldflags += " -lm"
return TestBase.build(self, name, cflags, ldflags) | namhyung/uftrace | [
2338,
351,
2338,
337,
1392115490
] |
def __init__(self, environment, filename, translation=None, transform=None):
self.environment = environment
self.filename = filename
if translation is None:
translation = (0.0, 0.0, 0.0)
elif len(translation) != 3:
raise ValueError("Translation must be a 3-component offset")
self.translation = tuple(translation)
self._transform = transform
vrml_parser = parser.Parser(parser.grammar, "vrmlFile")
processor = parseprocessor.ParseProcessor(baseURI=self.filename)
with open(self.filename, 'r') as f:
data = f.read()
self._scene = vrml_parser.parse(data, processor=processor)[1][1]
self._objects = None | timvandermeij/mobile-radio-tomography | [
4,
1,
4,
4,
1442839964
] |
def _parse_children(self, group, transform=None):
for child in group.children:
if isinstance(child, basenodes.Inline):
# Include the objects from the referenced file into the scene.
path = os.path.join(os.path.dirname(self.filename),
child.url[0])
loader = VRML_Loader(self.environment, path,
translation=self.translation,
transform=transform)
self._objects.extend(loader.get_objects())
elif isinstance(child, basenodes.Transform):
# Jumble up transformation matrices, in case they are nested.
forward = child.localMatrices().data[0]
if forward is not None:
if transform is not None:
new_transform = np.dot(transform, forward)
else:
new_transform = forward
else:
new_transform = transform
self._parse_children(child, new_transform)
elif isinstance(child, nodetypes.Grouping):
# Retrieve children from grouped nodes.
self._parse_children(child, transform)
elif isinstance(child, basenodes.Shape):
# Parse the coordinates from a shape's geometry.
self._parse_geometry(child.geometry, transform) | timvandermeij/mobile-radio-tomography | [
4,
1,
4,
4,
1442839964
] |
def forwards(self, orm):
"Write your forwards migration here" | tymofij/adofex | [
9,
1,
9,
36,
1314472327
] |
def scipy_dist(name, *args, **kwargs):
"""
Wraps calling a scipy.stats distribution to allow for pickling.
See https://github.com/scipy/scipy/issues/3125.
"""
return getattr(st, name)(*args, **kwargs) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
"""
The number of random variables that this distribution is over.
:type: `int`
"""
pass | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def sample(self, n=1):
"""
Returns one or more samples from this probability distribution.
:param int n: Number of samples to return.
:rtype: numpy.ndarray
:return: An array containing samples from the
distribution of shape ``(n, d)``, where ``d`` is the number of
random variables.
"""
pass | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def _sample(self):
pass | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, weights, dist, dist_args=None, dist_kw_args=None, shuffle=True):
super(MixtureDistribution, self).__init__()
self._weights = weights
self._n_dist = len(weights)
self._shuffle = shuffle
try:
self._example_dist = dist[0]
self._is_dist_list = True
self._dist_list = dist
assert(self._n_dist == len(self._dist_list))
except:
self._is_dist_list = False
self._dist = dist
self._dist_args = dist_args
self._dist_kw_args = dist_kw_args
assert(self._n_dist == self._dist_args.shape[0])
self._example_dist = self._dist(
*self._dist_arg(0),
**self._dist_kw_arg(0)
) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def _dist_kw_arg(self, k):
"""
Returns a dictionary of keyword arguments
for the k'th distribution.
:param int k: Index of the distribution in question.
:rtype: ``dict``
"""
if self._dist_kw_args is not None:
return {
key:self._dist_kw_args[key][k,:]
for key in self._dist_kw_args.keys()
}
else:
return {} | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return self._example_dist.n_rvs | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_dist(self):
"""
The number of distributions in the mixture distribution.
"""
return self._n_dist | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, n_mps=None, particle_locations=None, particle_weights=None):
super(ParticleDistribution, self).__init__()
if particle_locations is None or particle_weights is None:
# Initialize with single particle at origin.
self.particle_locations = np.zeros((1, n_mps))
self.particle_weights = np.ones((1,))
elif n_mps is None:
self.particle_locations = particle_locations
self.particle_weights = np.abs(particle_weights)
self.particle_weights = self.particle_weights / np.sum(self.particle_weights)
else:
raise ValueError('Either the dimension of parameter space, `n_mps`, or the particles, `particle_locations` and `particle_weights` must be specified.') | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_particles(self):
"""
Returns the number of particles in the distribution
:type: `int`
"""
return self.particle_locations.shape[0] | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_ess(self):
"""
Returns the effective sample size (ESS) of the current particle
distribution.
:type: `float`
:return: The effective sample size, given by :math:`1/\sum_i w_i^2`.
"""
return 1 / (np.sum(self.particle_weights**2)) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
"""
Returns the dimension of each particle.
:type: `int`
"""
return self.particle_locations.shape[1] | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def particle_mean(weights, locations):
r"""
Returns the arithmetic mean of the `locations` weighted by `weights`
:param numpy.ndarray weights: Weights of each particle in array of
shape ``(n_particles,)``.
:param numpy.ndarray locations: Locations of each particle in array
of shape ``(n_particles, n_modelparams)``
:rtype: :class:`numpy.ndarray`, shape ``(n_modelparams,)``.
:returns: An array containing the mean
"""
return np.dot(weights, locations) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def particle_covariance_mtx(cls, weights, locations):
"""
Returns an estimate of the covariance of a distribution
represented by a given set of SMC particle.
:param weights: An array of shape ``(n_particles,)`` containing
the weights of each particle.
:param location: An array of shape ``(n_particles, n_modelparams)``
containing the locations of each particle.
:rtype: :class:`numpy.ndarray`, shape
``(n_modelparams, n_modelparams)``.
:returns: An array containing the estimated covariance matrix.
"""
# Find the mean model vector, shape (n_modelparams, ).
mu = cls.particle_mean(weights, locations)
# Transpose the particle locations to have shape
# (n_modelparams, n_particles).
xs = locations.transpose([1, 0])
# Give a shorter name to the particle weights, shape (n_particles, ).
ws = weights
cov = (
# This sum is a reduction over the particle index, chosen to be
# axis=2. Thus, the sum represents an expectation value over the
# outer product $x . x^T$.
#
# All three factors have the particle index as the rightmost
# index, axis=2. Using the Einstein summation convention (ESC),
# we can reduce over the particle index easily while leaving
# the model parameter index to vary between the two factors
# of xs.
#
# This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i}
# using the ESC, where A_{m,n} is the temporary array created.
np.einsum('i,mi,ni', ws, xs, xs)
# We finish by subracting from the above expectation value
# the outer product $mu . mu^T$.
- np.dot(mu[..., np.newaxis], mu[np.newaxis, ...])
)
# The SMC approximation is not guaranteed to produce a
# positive-semidefinite covariance matrix. If a negative eigenvalue
# is produced, we should warn the caller of this.
assert np.all(np.isfinite(cov))
if not np.all(la.eig(cov)[0] >= 0):
warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning)
return cov | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def est_meanfn(self, fn):
"""
Returns an the expectation value of a given function
:math:`f` over the current particle distribution.
Here, :math:`f` is represented by a function ``fn`` that is vectorized
over particles, such that ``f(modelparams)`` has shape
``(n_particles, k)``, where ``n_particles = modelparams.shape[0]``, and
where ``k`` is a positive integer.
:param callable fn: Function implementing :math:`f` in a vectorized
manner. (See above.)
:rtype: :class:`numpy.ndarray`, shape ``(k, )``.
:returns: An array containing the an estimate of the mean of :math:`f`.
"""
return np.einsum('i...,i...',
self.particle_weights, fn(self.particle_locations)
) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def est_entropy(self):
r"""
Estimates the entropy of the current particle distribution
as :math:`-\sum_i w_i \log w_i` where :math:`\{w_i\}`
is the set of particles with nonzero weight.
"""
nz_weights = self.particle_weights[self.particle_weights > 0]
return -np.sum(np.log(nz_weights) * nz_weights) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def est_kl_divergence(self, other, kernel=None, delta=1e-2):
"""
Finds the KL divergence between this and another particle
distribution by using a kernel density estimator to smooth over the
other distribution's particles.
:param SMCUpdater other:
"""
return self._kl_divergence(
other.particle_locations,
other.particle_weights,
kernel, delta
) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def est_cluster_moments(self, cluster_opts=None):
# TODO: document
if cluster_opts is None:
cluster_opts = {}
for cluster_label, cluster_particles in particle_clusters(
self.particle_locations, self.particle_weights,
**cluster_opts
):
w = self.particle_weights[cluster_particles]
l = self.particle_locations[cluster_particles]
yield (
cluster_label,
sum(w), # The zeroth moment is very useful here!
self.particle_mean(w, l),
self.particle_covariance_mtx(w, l)
) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def est_cluster_metric(self, cluster_opts=None):
"""
Returns an estimate of how much of the variance in the current posterior
can be explained by a separation between *clusters*.
"""
wcv, bcv, tv = self.est_cluster_covs(cluster_opts)
return np.diag(bcv) / np.diag(tv) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def est_credible_region(self, level=0.95, return_outside=False, modelparam_slice=None):
"""
Returns an array containing particles inside a credible region of a
given level, such that the described region has probability mass
no less than the desired level.
Particles in the returned region are selected by including the highest-
weight particles first until the desired credibility level is reached.
:param float level: Crediblity level to report.
:param bool return_outside: If `True`, the return value is a tuple
of the those particles within the credible region, and the rest
of the posterior particle cloud.
:param slice modelparam_slice: Slice over which model parameters
to consider.
:rtype: :class:`numpy.ndarray`, shape ``(n_credible, n_mps)``,
where ``n_credible`` is the number of particles in the credible
region and ``n_mps`` corresponds to the size of ``modelparam_slice``.
If ``return_outside`` is ``True``, this method instead
returns tuple ``(inside, outside)`` where ``inside`` is as
described above, and ``outside`` has shape ``(n_particles-n_credible, n_mps)``.
:return: An array of particles inside the estimated credible region. Or,
if ``return_outside`` is ``True``, both the particles inside and the
particles outside, as a tuple.
"""
# which slice of modelparams to take
s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:]
mps = self.particle_locations[:, s_]
# Start by sorting the particles by weight.
# We do so by obtaining an array of indices `id_sort` such that
# `particle_weights[id_sort]` is in descending order.
id_sort = np.argsort(self.particle_weights)[::-1]
# Find the cummulative sum of the sorted weights.
cumsum_weights = np.cumsum(self.particle_weights[id_sort])
# Find all the indices where the sum is less than level.
# We first find id_cred such that
# `all(cumsum_weights[id_cred] <= level)`.
id_cred = cumsum_weights <= level
# By construction, by adding the next particle to id_cred, it must be
# true that `cumsum_weights[id_cred] >= level`, as required.
id_cred[np.sum(id_cred)] = True
# We now return a slice onto the particle_locations by first permuting
# the particles according to the sort order, then by selecting the
# credible particles.
if return_outside:
return (
mps[id_sort][id_cred],
mps[id_sort][np.logical_not(id_cred)]
)
else:
return mps[id_sort][id_cred] | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def region_est_ellipsoid(self, level=0.95, tol=0.0001, modelparam_slice=None):
r"""
Estimates a credible region over models by finding the minimum volume
enclosing ellipse (MVEE) of a credible subset of particles.
:param float level: The desired crediblity level (see
:meth:`SMCUpdater.est_credible_region`).
:param float tol: The allowed error tolerance in the MVEE optimization
(see :meth:`~qinfer.utils.mvee`).
:param slice modelparam_slice: Slice over which model parameters
to consider.
:return: A tuple ``(A, c)`` where ``A`` is the covariance
matrix of the ellipsoid and ``c`` is the center.
A point :math:`\vec{x}` is in the ellipsoid whenever
:math:`(\vec{x}-\vec{c})^{T}A^{-1}(\vec{x}-\vec{c})\leq 1`.
:rtype: ``A`` is ``np.ndarray`` of shape ``(n_mps,n_mps)`` and
``centroid`` is ``np.ndarray`` of shape ``(n_mps)``.
``n_mps`` corresponds to the size of ``param_slice``.
"""
_, vertices = self.region_est_hull(level=level, modelparam_slice=modelparam_slice)
A, centroid = u.mvee(vertices, tol)
return A, centroid | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, *factors):
if len(factors) == 1:
try:
self._factors = list(factors[0])
except:
self._factors = factors
else:
self._factors = factors | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return sum([f.n_rvs for f in self._factors]) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, ranges=_DEFAULT_RANGES):
if not isinstance(ranges, np.ndarray):
ranges = np.array(ranges)
if len(ranges.shape) == 1:
ranges = ranges[np.newaxis, ...]
self._ranges = ranges
self._n_rvs = ranges.shape[0]
self._delta = ranges[:, 1] - ranges[:, 0] | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return self._n_rvs | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def grad_log_pdf(self, var):
# THIS IS NOT TECHNICALLY LEGIT; BCRB doesn't technically work with a
# prior that doesn't go to 0 at its end points. But we do it anyway.
if var.shape[0] == 1:
return 12/(self._delta)**2
else:
return np.zeros(var.shape) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, values):
self._values = np.array(values)[np.newaxis, :] | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return self._values.shape[1] | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, mean, var, trunc=None):
self.mean = mean
self.var = var
if trunc is not None:
low, high = trunc
sigma = np.sqrt(var)
a = (low - mean) / sigma
b = (high - mean) / sigma
self.dist = partial(scipy_dist, 'truncnorm', a, b, loc=mean, scale=np.sqrt(var))
else:
self.dist = partial(scipy_dist, 'norm', mean, np.sqrt(var)) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return 1 | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def grad_log_pdf(self, x):
return -(x - self.mean) / self.var | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, mean, cov):
# Flatten the mean first, so we have a strong guarantee about its
# shape.
self.mean = np.array(mean).flatten()
self.cov = cov
self.invcov = la.inv(cov) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return self.mean.shape[0] | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def grad_log_pdf(self, x):
return -np.dot(self.invcov, (x - self.mean).transpose()).transpose() | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, ranges=_DEFAULT_RANGES, weight=0.01):
if not isinstance(ranges, np.ndarray):
ranges = np.array(ranges)
if len(ranges.shape) == 1:
ranges = ranges[np.newaxis, ...]
self._ranges = ranges
self._n_rvs = ranges.shape[0]
self._delta = ranges[:, 1] - ranges[:, 0]
self._weight = weight | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return self._n_rvs | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, mu=0, sigma=1):
self.mu = mu # lognormal location parameter
self.sigma = sigma # lognormal scale parameter
self.dist = partial(scipy_dist, 'lognorm', 1, mu, sigma) # scipy distribution location = 0 | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return 1 | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, alpha=None, beta=None, mean=None, var=None):
if alpha is not None and beta is not None:
self.alpha = alpha
self.beta = beta
self.mean = alpha / (alpha + beta)
self.var = alpha * beta / ((alpha + beta) ** 2 * (alpha + beta + 1))
elif mean is not None and var is not None:
self.mean = mean
self.var = var
self.alpha = mean ** 2 * (1 - mean) / var - mean
self.beta = (1 - mean) ** 2 * mean / var - (1 - mean)
else:
raise ValueError(
"BetaDistribution requires either (alpha and beta) "
"or (mean and var)."
)
self.dist = st.beta(a=self.alpha, b=self.beta) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def n_rvs(self):
return 1 | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def __init__(self, alpha):
self._alpha = np.array(alpha)
if self.alpha.ndim != 1:
raise ValueError('The input alpha must be a 1D list of concentration parameters.')
self._dist = st.dirichlet(alpha=self.alpha) | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
def alpha(self):
return self._alpha | csferrie/python-qinfer | [
91,
32,
91,
22,
1344992565
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.