repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | resize_program_image | def resize_program_image(img_url, img_size=300):
'''
Resize a program's thumbnail to the desired dimension
'''
match = re.match(r'.+/(\d+)x(\d+)/.+', img_url)
if not match:
_LOGGER.warning('Could not compute current image resolution of %s',
img_url)
return img_url
res_x = int(match.group(1))
res_y = int(match.group(2))
# aspect_ratio = res_x / res_y
target_res_y = int(img_size * res_y / res_x)
return re.sub(
r'{}x{}'.format(res_x, res_y),
r'{}x{}'.format(img_size, target_res_y),
img_url) | python | def resize_program_image(img_url, img_size=300):
'''
Resize a program's thumbnail to the desired dimension
'''
match = re.match(r'.+/(\d+)x(\d+)/.+', img_url)
if not match:
_LOGGER.warning('Could not compute current image resolution of %s',
img_url)
return img_url
res_x = int(match.group(1))
res_y = int(match.group(2))
# aspect_ratio = res_x / res_y
target_res_y = int(img_size * res_y / res_x)
return re.sub(
r'{}x{}'.format(res_x, res_y),
r'{}x{}'.format(img_size, target_res_y),
img_url) | [
"def",
"resize_program_image",
"(",
"img_url",
",",
"img_size",
"=",
"300",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'.+/(\\d+)x(\\d+)/.+'",
",",
"img_url",
")",
"if",
"not",
"match",
":",
"_LOGGER",
".",
"warning",
"(",
"'Could not compute current i... | Resize a program's thumbnail to the desired dimension | [
"Resize",
"a",
"program",
"s",
"thumbnail",
"to",
"the",
"desired",
"dimension"
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L83-L99 | train |
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | get_current_program_progress | def get_current_program_progress(program):
'''
Get the current progress of the program in %
'''
now = datetime.datetime.now()
program_duration = get_program_duration(program)
if not program_duration:
return
progress = now - program.get('start_time')
return progress.seconds * 100 / program_duration | python | def get_current_program_progress(program):
'''
Get the current progress of the program in %
'''
now = datetime.datetime.now()
program_duration = get_program_duration(program)
if not program_duration:
return
progress = now - program.get('start_time')
return progress.seconds * 100 / program_duration | [
"def",
"get_current_program_progress",
"(",
"program",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"program_duration",
"=",
"get_program_duration",
"(",
"program",
")",
"if",
"not",
"program_duration",
":",
"return",
"progress",
"=",... | Get the current progress of the program in % | [
"Get",
"the",
"current",
"progress",
"of",
"the",
"program",
"in",
"%"
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L102-L111 | train |
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | get_program_duration | def get_program_duration(program):
'''
Get a program's duration in seconds
'''
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start and/or end times.')
_LOGGER.debug('Program data: %s', program)
return
program_duration = program_end - program_start
return program_duration.seconds | python | def get_program_duration(program):
'''
Get a program's duration in seconds
'''
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start and/or end times.')
_LOGGER.debug('Program data: %s', program)
return
program_duration = program_end - program_start
return program_duration.seconds | [
"def",
"get_program_duration",
"(",
"program",
")",
":",
"program_start",
"=",
"program",
".",
"get",
"(",
"'start_time'",
")",
"program_end",
"=",
"program",
".",
"get",
"(",
"'end_time'",
")",
"if",
"not",
"program_start",
"or",
"not",
"program_end",
":",
... | Get a program's duration in seconds | [
"Get",
"a",
"program",
"s",
"duration",
"in",
"seconds"
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L114-L125 | train |
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | get_remaining_time | def get_remaining_time(program):
'''
Get the remaining time in seconds of a program that is currently on.
'''
now = datetime.datetime.now()
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start and/or end times.')
_LOGGER.debug('Program data: %s', program)
return
if now > program_end:
_LOGGER.error('The provided program has already ended.')
_LOGGER.debug('Program data: %s', program)
return 0
progress = now - program_start
return progress.seconds | python | def get_remaining_time(program):
'''
Get the remaining time in seconds of a program that is currently on.
'''
now = datetime.datetime.now()
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start and/or end times.')
_LOGGER.debug('Program data: %s', program)
return
if now > program_end:
_LOGGER.error('The provided program has already ended.')
_LOGGER.debug('Program data: %s', program)
return 0
progress = now - program_start
return progress.seconds | [
"def",
"get_remaining_time",
"(",
"program",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"program_start",
"=",
"program",
".",
"get",
"(",
"'start_time'",
")",
"program_end",
"=",
"program",
".",
"get",
"(",
"'end_time'",
")",
... | Get the remaining time in seconds of a program that is currently on. | [
"Get",
"the",
"remaining",
"time",
"in",
"seconds",
"of",
"a",
"program",
"that",
"is",
"currently",
"on",
"."
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L128-L144 | train |
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | extract_program_summary | def extract_program_summary(data):
'''
Extract the summary data from a program's detail page
'''
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, 'html.parser')
try:
return soup.find(
'div', {'class': 'episode-synopsis'}
).find_all('div')[-1].text.strip()
except Exception:
_LOGGER.info('No summary found for program: %s',
soup.find('a', {'class': 'prog_name'}))
return "No summary" | python | def extract_program_summary(data):
'''
Extract the summary data from a program's detail page
'''
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, 'html.parser')
try:
return soup.find(
'div', {'class': 'episode-synopsis'}
).find_all('div')[-1].text.strip()
except Exception:
_LOGGER.info('No summary found for program: %s',
soup.find('a', {'class': 'prog_name'}))
return "No summary" | [
"def",
"extract_program_summary",
"(",
"data",
")",
":",
"from",
"bs4",
"import",
"BeautifulSoup",
"soup",
"=",
"BeautifulSoup",
"(",
"data",
",",
"'html.parser'",
")",
"try",
":",
"return",
"soup",
".",
"find",
"(",
"'div'",
",",
"{",
"'class'",
":",
"'ep... | Extract the summary data from a program's detail page | [
"Extract",
"the",
"summary",
"data",
"from",
"a",
"program",
"s",
"detail",
"page"
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L147-L160 | train |
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | async_set_summary | async def async_set_summary(program):
'''
Set a program's summary
'''
import aiohttp
async with aiohttp.ClientSession() as session:
resp = await session.get(program.get('url'))
text = await resp.text()
summary = extract_program_summary(text)
program['summary'] = summary
return program | python | async def async_set_summary(program):
'''
Set a program's summary
'''
import aiohttp
async with aiohttp.ClientSession() as session:
resp = await session.get(program.get('url'))
text = await resp.text()
summary = extract_program_summary(text)
program['summary'] = summary
return program | [
"async",
"def",
"async_set_summary",
"(",
"program",
")",
":",
"import",
"aiohttp",
"async",
"with",
"aiohttp",
".",
"ClientSession",
"(",
")",
"as",
"session",
":",
"resp",
"=",
"await",
"session",
".",
"get",
"(",
"program",
".",
"get",
"(",
"'url'",
"... | Set a program's summary | [
"Set",
"a",
"program",
"s",
"summary"
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L163-L173 | train |
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | async_get_program_guide | async def async_get_program_guide(channel, no_cache=False, refresh_interval=4):
'''
Get the program data for a channel
'''
chan = await async_determine_channel(channel)
now = datetime.datetime.now()
max_cache_age = datetime.timedelta(hours=refresh_interval)
if not no_cache and 'guide' in _CACHE and _CACHE.get('guide').get(chan):
cache = _CACHE.get('guide').get(chan)
cache_age = cache.get('last_updated')
if now - cache_age < max_cache_age:
_LOGGER.debug('Found program guide in cache.')
return cache.get('data')
else:
_LOGGER.debug('Found outdated program guide in cache. Update it.')
_CACHE['guide'].pop(chan)
chans = await async_get_channels()
url = chans.get('data', {}).get(chan)
if not url:
_LOGGER.error('Could not determine URL for %s', chan)
return
soup = await _async_request_soup(url)
programs = []
for prg_item in soup.find_all('div', {'class': 'program-infos'}):
try:
prog_info = prg_item.find('a', {'class': 'prog_name'})
prog_name = prog_info.text.strip()
prog_url = prog_info.get('href')
if not prog_url:
_LOGGER.warning('Failed to retrive the detail URL for program %s. '
'The summary will be empty', prog_name)
prog_type = prg_item.find('span', {'class': 'prog_type'}).text.strip()
prog_times = prg_item.find('div', {'class': 'prog_progress'})
prog_start = datetime.datetime.fromtimestamp(
int(prog_times.get('data-start')))
prog_end = datetime.datetime.fromtimestamp(
int(prog_times.get('data-end')))
img = prg_item.find_previous_sibling().find(
'img', {'class': 'prime_broadcast_image'})
prog_img = img.get('data-src') if img else None
programs.append(
{'name': prog_name, 'type': prog_type, 'img': prog_img,
'url': prog_url, 'summary': None, 'start_time': prog_start,
'end_time': prog_end})
except Exception as exc:
_LOGGER.error('Exception occured while fetching the program '
'guide for channel %s: %s', chan, exc)
import traceback
traceback.print_exc()
# Set the program summaries asynchronously
tasks = [async_set_summary(prog) for prog in programs]
programs = await asyncio.gather(*tasks)
if programs:
if 'guide' not in _CACHE:
_CACHE['guide'] = {}
_CACHE['guide'][chan] = {'last_updated': now, 'data': programs}
return programs | python | async def async_get_program_guide(channel, no_cache=False, refresh_interval=4):
'''
Get the program data for a channel
'''
chan = await async_determine_channel(channel)
now = datetime.datetime.now()
max_cache_age = datetime.timedelta(hours=refresh_interval)
if not no_cache and 'guide' in _CACHE and _CACHE.get('guide').get(chan):
cache = _CACHE.get('guide').get(chan)
cache_age = cache.get('last_updated')
if now - cache_age < max_cache_age:
_LOGGER.debug('Found program guide in cache.')
return cache.get('data')
else:
_LOGGER.debug('Found outdated program guide in cache. Update it.')
_CACHE['guide'].pop(chan)
chans = await async_get_channels()
url = chans.get('data', {}).get(chan)
if not url:
_LOGGER.error('Could not determine URL for %s', chan)
return
soup = await _async_request_soup(url)
programs = []
for prg_item in soup.find_all('div', {'class': 'program-infos'}):
try:
prog_info = prg_item.find('a', {'class': 'prog_name'})
prog_name = prog_info.text.strip()
prog_url = prog_info.get('href')
if not prog_url:
_LOGGER.warning('Failed to retrive the detail URL for program %s. '
'The summary will be empty', prog_name)
prog_type = prg_item.find('span', {'class': 'prog_type'}).text.strip()
prog_times = prg_item.find('div', {'class': 'prog_progress'})
prog_start = datetime.datetime.fromtimestamp(
int(prog_times.get('data-start')))
prog_end = datetime.datetime.fromtimestamp(
int(prog_times.get('data-end')))
img = prg_item.find_previous_sibling().find(
'img', {'class': 'prime_broadcast_image'})
prog_img = img.get('data-src') if img else None
programs.append(
{'name': prog_name, 'type': prog_type, 'img': prog_img,
'url': prog_url, 'summary': None, 'start_time': prog_start,
'end_time': prog_end})
except Exception as exc:
_LOGGER.error('Exception occured while fetching the program '
'guide for channel %s: %s', chan, exc)
import traceback
traceback.print_exc()
# Set the program summaries asynchronously
tasks = [async_set_summary(prog) for prog in programs]
programs = await asyncio.gather(*tasks)
if programs:
if 'guide' not in _CACHE:
_CACHE['guide'] = {}
_CACHE['guide'][chan] = {'last_updated': now, 'data': programs}
return programs | [
"async",
"def",
"async_get_program_guide",
"(",
"channel",
",",
"no_cache",
"=",
"False",
",",
"refresh_interval",
"=",
"4",
")",
":",
"chan",
"=",
"await",
"async_determine_channel",
"(",
"channel",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
... | Get the program data for a channel | [
"Get",
"the",
"program",
"data",
"for",
"a",
"channel"
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L176-L232 | train |
pschmitt/pyteleloisirs | pyteleloisirs/pyteleloisirs.py | async_get_current_program | async def async_get_current_program(channel, no_cache=False):
'''
Get the current program info
'''
chan = await async_determine_channel(channel)
guide = await async_get_program_guide(chan, no_cache)
if not guide:
_LOGGER.warning('Could not retrieve TV program for %s', channel)
return
now = datetime.datetime.now()
for prog in guide:
start = prog.get('start_time')
end = prog.get('end_time')
if now > start and now < end:
return prog | python | async def async_get_current_program(channel, no_cache=False):
'''
Get the current program info
'''
chan = await async_determine_channel(channel)
guide = await async_get_program_guide(chan, no_cache)
if not guide:
_LOGGER.warning('Could not retrieve TV program for %s', channel)
return
now = datetime.datetime.now()
for prog in guide:
start = prog.get('start_time')
end = prog.get('end_time')
if now > start and now < end:
return prog | [
"async",
"def",
"async_get_current_program",
"(",
"channel",
",",
"no_cache",
"=",
"False",
")",
":",
"chan",
"=",
"await",
"async_determine_channel",
"(",
"channel",
")",
"guide",
"=",
"await",
"async_get_program_guide",
"(",
"chan",
",",
"no_cache",
")",
"if",... | Get the current program info | [
"Get",
"the",
"current",
"program",
"info"
] | d63610fd3729862455ac42afca440469f8063fba | https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L235-L249 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | PublishManager.publish | def publish(self, distribution, storage=""):
"""
Get or create publish
"""
try:
return self._publishes[distribution]
except KeyError:
self._publishes[distribution] = Publish(self.client, distribution, timestamp=self.timestamp, storage=(storage or self.storage))
return self._publishes[distribution] | python | def publish(self, distribution, storage=""):
"""
Get or create publish
"""
try:
return self._publishes[distribution]
except KeyError:
self._publishes[distribution] = Publish(self.client, distribution, timestamp=self.timestamp, storage=(storage or self.storage))
return self._publishes[distribution] | [
"def",
"publish",
"(",
"self",
",",
"distribution",
",",
"storage",
"=",
"\"\"",
")",
":",
"try",
":",
"return",
"self",
".",
"_publishes",
"[",
"distribution",
"]",
"except",
"KeyError",
":",
"self",
".",
"_publishes",
"[",
"distribution",
"]",
"=",
"Pu... | Get or create publish | [
"Get",
"or",
"create",
"publish"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L29-L37 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | PublishManager.add | def add(self, snapshot, distributions, component='main', storage=""):
""" Add mirror or repo to publish """
for dist in distributions:
self.publish(dist, storage=storage).add(snapshot, component) | python | def add(self, snapshot, distributions, component='main', storage=""):
""" Add mirror or repo to publish """
for dist in distributions:
self.publish(dist, storage=storage).add(snapshot, component) | [
"def",
"add",
"(",
"self",
",",
"snapshot",
",",
"distributions",
",",
"component",
"=",
"'main'",
",",
"storage",
"=",
"\"\"",
")",
":",
"for",
"dist",
"in",
"distributions",
":",
"self",
".",
"publish",
"(",
"dist",
",",
"storage",
"=",
"storage",
")... | Add mirror or repo to publish | [
"Add",
"mirror",
"or",
"repo",
"to",
"publish"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L39-L42 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | PublishManager._publish_match | def _publish_match(self, publish, names=False, name_only=False):
"""
Check if publish name matches list of names or regex patterns
"""
if names:
for name in names:
if not name_only and isinstance(name, re._pattern_type):
if re.match(name, publish.name):
return True
else:
operand = name if name_only else [name, './%s' % name]
if publish in operand:
return True
return False
else:
return True | python | def _publish_match(self, publish, names=False, name_only=False):
"""
Check if publish name matches list of names or regex patterns
"""
if names:
for name in names:
if not name_only and isinstance(name, re._pattern_type):
if re.match(name, publish.name):
return True
else:
operand = name if name_only else [name, './%s' % name]
if publish in operand:
return True
return False
else:
return True | [
"def",
"_publish_match",
"(",
"self",
",",
"publish",
",",
"names",
"=",
"False",
",",
"name_only",
"=",
"False",
")",
":",
"if",
"names",
":",
"for",
"name",
"in",
"names",
":",
"if",
"not",
"name_only",
"and",
"isinstance",
"(",
"name",
",",
"re",
... | Check if publish name matches list of names or regex patterns | [
"Check",
"if",
"publish",
"name",
"matches",
"list",
"of",
"names",
"or",
"regex",
"patterns"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L87-L102 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | PublishManager.get_repo_information | def get_repo_information(config, client, fill_repo=False, components=[]):
""" fill two dictionnaries : one containing all the packages for every repository
and the second one associating to every component of every publish its repository"""
repo_dict = {}
publish_dict = {}
for origin in ['repo', 'mirror']:
for name, repo in config.get(origin, {}).items():
if components and repo.get('component') not in components:
continue
if fill_repo and origin == 'repo':
packages = Publish._get_packages("repos", name)
repo_dict[name] = packages
for distribution in repo.get('distributions'):
publish_name = str.join('/', distribution.split('/')[:-1])
publish_dict[(publish_name, repo.get('component'))] = name
return (repo_dict, publish_dict) | python | def get_repo_information(config, client, fill_repo=False, components=[]):
""" fill two dictionnaries : one containing all the packages for every repository
and the second one associating to every component of every publish its repository"""
repo_dict = {}
publish_dict = {}
for origin in ['repo', 'mirror']:
for name, repo in config.get(origin, {}).items():
if components and repo.get('component') not in components:
continue
if fill_repo and origin == 'repo':
packages = Publish._get_packages("repos", name)
repo_dict[name] = packages
for distribution in repo.get('distributions'):
publish_name = str.join('/', distribution.split('/')[:-1])
publish_dict[(publish_name, repo.get('component'))] = name
return (repo_dict, publish_dict) | [
"def",
"get_repo_information",
"(",
"config",
",",
"client",
",",
"fill_repo",
"=",
"False",
",",
"components",
"=",
"[",
"]",
")",
":",
"repo_dict",
"=",
"{",
"}",
"publish_dict",
"=",
"{",
"}",
"for",
"origin",
"in",
"[",
"'repo'",
",",
"'mirror'",
"... | fill two dictionnaries : one containing all the packages for every repository
and the second one associating to every component of every publish its repository | [
"fill",
"two",
"dictionnaries",
":",
"one",
"containing",
"all",
"the",
"packages",
"for",
"every",
"repository",
"and",
"the",
"second",
"one",
"associating",
"to",
"every",
"component",
"of",
"every",
"publish",
"its",
"repository"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L144-L161 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.compare | def compare(self, other, components=[]):
"""
Compare two publishes
It expects that other publish is same or older than this one
Return tuple (diff, equal) of dict {'component': ['snapshot']}
"""
lg.debug("Comparing publish %s (%s) and %s (%s)" % (self.name, self.storage or "local", other.name, other.storage or "local"))
diff, equal = ({}, {})
for component, snapshots in self.components.items():
if component not in list(other.components.keys()):
# Component is missing in other
diff[component] = snapshots
continue
equal_snapshots = list(set(snapshots).intersection(other.components[component]))
if equal_snapshots:
lg.debug("Equal snapshots for %s: %s" % (component, equal_snapshots))
equal[component] = equal_snapshots
diff_snapshots = list(set(snapshots).difference(other.components[component]))
if diff_snapshots:
lg.debug("Different snapshots for %s: %s" % (component, diff_snapshots))
diff[component] = diff_snapshots
return (diff, equal) | python | def compare(self, other, components=[]):
"""
Compare two publishes
It expects that other publish is same or older than this one
Return tuple (diff, equal) of dict {'component': ['snapshot']}
"""
lg.debug("Comparing publish %s (%s) and %s (%s)" % (self.name, self.storage or "local", other.name, other.storage or "local"))
diff, equal = ({}, {})
for component, snapshots in self.components.items():
if component not in list(other.components.keys()):
# Component is missing in other
diff[component] = snapshots
continue
equal_snapshots = list(set(snapshots).intersection(other.components[component]))
if equal_snapshots:
lg.debug("Equal snapshots for %s: %s" % (component, equal_snapshots))
equal[component] = equal_snapshots
diff_snapshots = list(set(snapshots).difference(other.components[component]))
if diff_snapshots:
lg.debug("Different snapshots for %s: %s" % (component, diff_snapshots))
diff[component] = diff_snapshots
return (diff, equal) | [
"def",
"compare",
"(",
"self",
",",
"other",
",",
"components",
"=",
"[",
"]",
")",
":",
"lg",
".",
"debug",
"(",
"\"Comparing publish %s (%s) and %s (%s)\"",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"storage",
"or",
"\"local\"",
",",
"other",
".... | Compare two publishes
It expects that other publish is same or older than this one
Return tuple (diff, equal) of dict {'component': ['snapshot']} | [
"Compare",
"two",
"publishes",
"It",
"expects",
"that",
"other",
"publish",
"is",
"same",
"or",
"older",
"than",
"this",
"one"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L275-L302 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish._get_publish | def _get_publish(self):
"""
Find this publish on remote
"""
publishes = self._get_publishes(self.client)
for publish in publishes:
if publish['Distribution'] == self.distribution and \
publish['Prefix'].replace("/", "_") == (self.prefix or '.') and \
publish['Storage'] == self.storage:
return publish
raise NoSuchPublish("Publish %s (%s) does not exist" % (self.name, self.storage or "local")) | python | def _get_publish(self):
"""
Find this publish on remote
"""
publishes = self._get_publishes(self.client)
for publish in publishes:
if publish['Distribution'] == self.distribution and \
publish['Prefix'].replace("/", "_") == (self.prefix or '.') and \
publish['Storage'] == self.storage:
return publish
raise NoSuchPublish("Publish %s (%s) does not exist" % (self.name, self.storage or "local")) | [
"def",
"_get_publish",
"(",
"self",
")",
":",
"publishes",
"=",
"self",
".",
"_get_publishes",
"(",
"self",
".",
"client",
")",
"for",
"publish",
"in",
"publishes",
":",
"if",
"publish",
"[",
"'Distribution'",
"]",
"==",
"self",
".",
"distribution",
"and",... | Find this publish on remote | [
"Find",
"this",
"publish",
"on",
"remote"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L351-L361 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.save_publish | def save_publish(self, save_path):
"""
Serialize publish in YAML
"""
timestamp = time.strftime("%Y%m%d%H%M%S")
yaml_dict = {}
yaml_dict["publish"] = self.name
yaml_dict["name"] = timestamp
yaml_dict["components"] = []
yaml_dict["storage"] = self.storage
for component, snapshots in self.components.items():
packages = self.get_packages(component)
package_dict = []
for package in packages:
(arch, name, version, ref) = self.parse_package_ref(package)
package_dict.append({'package': name, 'version': version, 'arch': arch, 'ref': ref})
snapshot = self._find_snapshot(snapshots[0])
yaml_dict["components"].append({'component': component, 'snapshot': snapshot['Name'],
'description': snapshot['Description'], 'packages': package_dict})
name = self.name.replace('/', '-')
lg.info("Saving publish %s in %s" % (name, save_path))
with open(save_path, 'w') as save_file:
yaml.dump(yaml_dict, save_file, default_flow_style=False) | python | def save_publish(self, save_path):
"""
Serialize publish in YAML
"""
timestamp = time.strftime("%Y%m%d%H%M%S")
yaml_dict = {}
yaml_dict["publish"] = self.name
yaml_dict["name"] = timestamp
yaml_dict["components"] = []
yaml_dict["storage"] = self.storage
for component, snapshots in self.components.items():
packages = self.get_packages(component)
package_dict = []
for package in packages:
(arch, name, version, ref) = self.parse_package_ref(package)
package_dict.append({'package': name, 'version': version, 'arch': arch, 'ref': ref})
snapshot = self._find_snapshot(snapshots[0])
yaml_dict["components"].append({'component': component, 'snapshot': snapshot['Name'],
'description': snapshot['Description'], 'packages': package_dict})
name = self.name.replace('/', '-')
lg.info("Saving publish %s in %s" % (name, save_path))
with open(save_path, 'w') as save_file:
yaml.dump(yaml_dict, save_file, default_flow_style=False) | [
"def",
"save_publish",
"(",
"self",
",",
"save_path",
")",
":",
"timestamp",
"=",
"time",
".",
"strftime",
"(",
"\"%Y%m%d%H%M%S\"",
")",
"yaml_dict",
"=",
"{",
"}",
"yaml_dict",
"[",
"\"publish\"",
"]",
"=",
"self",
".",
"name",
"yaml_dict",
"[",
"\"name\"... | Serialize publish in YAML | [
"Serialize",
"publish",
"in",
"YAML"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L367-L392 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.restore_publish | def restore_publish(self, config, components, recreate=False):
"""
Restore publish from config file
"""
if "all" in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
publish = False
new_publish_snapshots = []
to_publish = []
created_snapshots = []
for saved_component in config.get('components', []):
component_name = saved_component.get('component')
if not component_name:
raise Exception("Corrupted file")
if components and component_name not in components:
continue
saved_packages = []
if not saved_component.get('packages'):
raise Exception("Component %s is empty" % component_name)
for package in saved_component.get('packages'):
package_ref = '{} {} {} {}'.format(package.get('arch'), package.get('package'), package.get('version'), package.get('ref'))
saved_packages.append(package_ref)
to_publish.append(component_name)
timestamp = time.strftime("%Y%m%d%H%M%S")
snapshot_name = '{}-{}-{}'.format("restored", timestamp, saved_component.get('snapshot'))
lg.debug("Creating snapshot %s for component %s of packages: %s"
% (snapshot_name, component_name, saved_packages))
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': [],
'Description': saved_component.get('description'),
'PackageRefs': saved_packages,
}
)
created_snapshots.append(snapshot_name)
except AptlyException as e:
if e.res.status_code == 404:
# delete all the previously created
# snapshots because the file is corrupted
self._remove_snapshots(created_snapshots)
raise Exception("Source snapshot or packages don't exist")
else:
raise
new_publish_snapshots.append({
'Component': component_name,
'Name': snapshot_name
})
if components:
self.publish_snapshots = [x for x in self.publish_snapshots if x['Component'] not in components and x['Component'] not in to_publish]
check_components = [x for x in new_publish_snapshots if x['Component'] in components]
if len(check_components) != len(components):
self._remove_snapshots(created_snapshots)
raise Exception("Not possible to find all the components required in the backup file")
self.publish_snapshots += new_publish_snapshots
self.do_publish(recreate=recreate, merge_snapshots=False) | python | def restore_publish(self, config, components, recreate=False):
"""
Restore publish from config file
"""
if "all" in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
publish = False
new_publish_snapshots = []
to_publish = []
created_snapshots = []
for saved_component in config.get('components', []):
component_name = saved_component.get('component')
if not component_name:
raise Exception("Corrupted file")
if components and component_name not in components:
continue
saved_packages = []
if not saved_component.get('packages'):
raise Exception("Component %s is empty" % component_name)
for package in saved_component.get('packages'):
package_ref = '{} {} {} {}'.format(package.get('arch'), package.get('package'), package.get('version'), package.get('ref'))
saved_packages.append(package_ref)
to_publish.append(component_name)
timestamp = time.strftime("%Y%m%d%H%M%S")
snapshot_name = '{}-{}-{}'.format("restored", timestamp, saved_component.get('snapshot'))
lg.debug("Creating snapshot %s for component %s of packages: %s"
% (snapshot_name, component_name, saved_packages))
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': [],
'Description': saved_component.get('description'),
'PackageRefs': saved_packages,
}
)
created_snapshots.append(snapshot_name)
except AptlyException as e:
if e.res.status_code == 404:
# delete all the previously created
# snapshots because the file is corrupted
self._remove_snapshots(created_snapshots)
raise Exception("Source snapshot or packages don't exist")
else:
raise
new_publish_snapshots.append({
'Component': component_name,
'Name': snapshot_name
})
if components:
self.publish_snapshots = [x for x in self.publish_snapshots if x['Component'] not in components and x['Component'] not in to_publish]
check_components = [x for x in new_publish_snapshots if x['Component'] in components]
if len(check_components) != len(components):
self._remove_snapshots(created_snapshots)
raise Exception("Not possible to find all the components required in the backup file")
self.publish_snapshots += new_publish_snapshots
self.do_publish(recreate=recreate, merge_snapshots=False) | [
"def",
"restore_publish",
"(",
"self",
",",
"config",
",",
"components",
",",
"recreate",
"=",
"False",
")",
":",
"if",
"\"all\"",
"in",
"components",
":",
"components",
"=",
"[",
"]",
"try",
":",
"self",
".",
"load",
"(",
")",
"publish",
"=",
"True",
... | Restore publish from config file | [
"Restore",
"publish",
"from",
"config",
"file"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L467-L541 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.load | def load(self):
"""
Load publish info from remote
"""
publish = self._get_publish()
self.architectures = publish['Architectures']
for source in publish['Sources']:
component = source['Component']
snapshot = source['Name']
self.publish_snapshots.append({
'Component': component,
'Name': snapshot
})
snapshot_remote = self._find_snapshot(snapshot)
for source in self._get_source_snapshots(snapshot_remote, fallback_self=True):
self.add(source, component) | python | def load(self):
"""
Load publish info from remote
"""
publish = self._get_publish()
self.architectures = publish['Architectures']
for source in publish['Sources']:
component = source['Component']
snapshot = source['Name']
self.publish_snapshots.append({
'Component': component,
'Name': snapshot
})
snapshot_remote = self._find_snapshot(snapshot)
for source in self._get_source_snapshots(snapshot_remote, fallback_self=True):
self.add(source, component) | [
"def",
"load",
"(",
"self",
")",
":",
"publish",
"=",
"self",
".",
"_get_publish",
"(",
")",
"self",
".",
"architectures",
"=",
"publish",
"[",
"'Architectures'",
"]",
"for",
"source",
"in",
"publish",
"[",
"'Sources'",
"]",
":",
"component",
"=",
"sourc... | Load publish info from remote | [
"Load",
"publish",
"info",
"from",
"remote"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L543-L559 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.get_packages | def get_packages(self, component=None, components=[], packages=None):
"""
Return package refs for given components
"""
if component:
components = [component]
package_refs = []
for snapshot in self.publish_snapshots:
if component and snapshot['Component'] not in components:
# We don't want packages for this component
continue
component_refs = self._get_packages(self.client, "snapshots", snapshot['Name'])
if packages:
# Filter package names
for ref in component_refs:
if self.parse_package_ref(ref)[1] in packages:
package_refs.append(ref)
else:
package_refs.extend(component_refs)
return package_refs | python | def get_packages(self, component=None, components=[], packages=None):
"""
Return package refs for given components
"""
if component:
components = [component]
package_refs = []
for snapshot in self.publish_snapshots:
if component and snapshot['Component'] not in components:
# We don't want packages for this component
continue
component_refs = self._get_packages(self.client, "snapshots", snapshot['Name'])
if packages:
# Filter package names
for ref in component_refs:
if self.parse_package_ref(ref)[1] in packages:
package_refs.append(ref)
else:
package_refs.extend(component_refs)
return package_refs | [
"def",
"get_packages",
"(",
"self",
",",
"component",
"=",
"None",
",",
"components",
"=",
"[",
"]",
",",
"packages",
"=",
"None",
")",
":",
"if",
"component",
":",
"components",
"=",
"[",
"component",
"]",
"package_refs",
"=",
"[",
"]",
"for",
"snapsh... | Return package refs for given components | [
"Return",
"package",
"refs",
"for",
"given",
"components"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L561-L583 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.parse_package_ref | def parse_package_ref(self, ref):
"""
Return tuple of architecture, package_name, version, id
"""
if not ref:
return None
parsed = re.match('(.*)\ (.*)\ (.*)\ (.*)', ref)
return parsed.groups() | python | def parse_package_ref(self, ref):
"""
Return tuple of architecture, package_name, version, id
"""
if not ref:
return None
parsed = re.match('(.*)\ (.*)\ (.*)\ (.*)', ref)
return parsed.groups() | [
"def",
"parse_package_ref",
"(",
"self",
",",
"ref",
")",
":",
"if",
"not",
"ref",
":",
"return",
"None",
"parsed",
"=",
"re",
".",
"match",
"(",
"'(.*)\\ (.*)\\ (.*)\\ (.*)'",
",",
"ref",
")",
"return",
"parsed",
".",
"groups",
"(",
")"
] | Return tuple of architecture, package_name, version, id | [
"Return",
"tuple",
"of",
"architecture",
"package_name",
"version",
"id"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L585-L592 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.add | def add(self, snapshot, component='main'):
"""
Add snapshot of component to publish
"""
try:
self.components[component].append(snapshot)
except KeyError:
self.components[component] = [snapshot] | python | def add(self, snapshot, component='main'):
"""
Add snapshot of component to publish
"""
try:
self.components[component].append(snapshot)
except KeyError:
self.components[component] = [snapshot] | [
"def",
"add",
"(",
"self",
",",
"snapshot",
",",
"component",
"=",
"'main'",
")",
":",
"try",
":",
"self",
".",
"components",
"[",
"component",
"]",
".",
"append",
"(",
"snapshot",
")",
"except",
"KeyError",
":",
"self",
".",
"components",
"[",
"compon... | Add snapshot of component to publish | [
"Add",
"snapshot",
"of",
"component",
"to",
"publish"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L594-L601 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish._find_snapshot | def _find_snapshot(self, name):
"""
Find snapshot on remote by name or regular expression
"""
remote_snapshots = self._get_snapshots(self.client)
for remote in reversed(remote_snapshots):
if remote["Name"] == name or \
re.match(name, remote["Name"]):
return remote
return None | python | def _find_snapshot(self, name):
"""
Find snapshot on remote by name or regular expression
"""
remote_snapshots = self._get_snapshots(self.client)
for remote in reversed(remote_snapshots):
if remote["Name"] == name or \
re.match(name, remote["Name"]):
return remote
return None | [
"def",
"_find_snapshot",
"(",
"self",
",",
"name",
")",
":",
"remote_snapshots",
"=",
"self",
".",
"_get_snapshots",
"(",
"self",
".",
"client",
")",
"for",
"remote",
"in",
"reversed",
"(",
"remote_snapshots",
")",
":",
"if",
"remote",
"[",
"\"Name\"",
"]"... | Find snapshot on remote by name or regular expression | [
"Find",
"snapshot",
"on",
"remote",
"by",
"name",
"or",
"regular",
"expression"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L603-L612 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish._get_source_snapshots | def _get_source_snapshots(self, snapshot, fallback_self=False):
"""
Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment
"""
if not snapshot:
return []
source_snapshots = re.findall(r"'([\w\d\.-]+)'", snapshot['Description'])
if not source_snapshots and fallback_self:
source_snapshots = [snapshot['Name']]
source_snapshots.sort()
return source_snapshots | python | def _get_source_snapshots(self, snapshot, fallback_self=False):
"""
Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment
"""
if not snapshot:
return []
source_snapshots = re.findall(r"'([\w\d\.-]+)'", snapshot['Description'])
if not source_snapshots and fallback_self:
source_snapshots = [snapshot['Name']]
source_snapshots.sort()
return source_snapshots | [
"def",
"_get_source_snapshots",
"(",
"self",
",",
"snapshot",
",",
"fallback_self",
"=",
"False",
")",
":",
"if",
"not",
"snapshot",
":",
"return",
"[",
"]",
"source_snapshots",
"=",
"re",
".",
"findall",
"(",
"r\"'([\\w\\d\\.-]+)'\"",
",",
"snapshot",
"[",
... | Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment | [
"Get",
"list",
"of",
"source",
"snapshot",
"names",
"of",
"given",
"snapshot"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L614-L628 | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | Publish.merge_snapshots | def merge_snapshots(self):
"""
Create component snapshots by merging other snapshots of same component
"""
self.publish_snapshots = []
for component, snapshots in self.components.items():
if len(snapshots) <= 1:
# Only one snapshot, no need to merge
lg.debug("Component %s has only one snapshot %s, not creating merge snapshot" % (component, snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': snapshots[0]
})
continue
# Look if merged snapshot doesn't already exist
remote_snapshot = self._find_snapshot(r'^%s%s-%s-\d+' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component))
if remote_snapshot:
source_snapshots = self._get_source_snapshots(remote_snapshot)
# Check if latest merged snapshot has same source snapshots like us
snapshots_want = list(snapshots)
snapshots_want.sort()
lg.debug("Comparing snapshots: snapshot_name=%s, snapshot_sources=%s, wanted_sources=%s" % (remote_snapshot['Name'], source_snapshots, snapshots_want))
if snapshots_want == source_snapshots:
lg.info("Remote merge snapshot already exists: %s (%s)" % (remote_snapshot['Name'], source_snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': remote_snapshot['Name']
})
continue
snapshot_name = '%s%s-%s-%s' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component, self.timestamp)
lg.info("Creating merge snapshot %s for component %s of snapshots %s" % (snapshot_name, component, snapshots))
package_refs = []
for snapshot in snapshots:
# Get package refs from each snapshot
packages = self._get_packages(self.client, "snapshots", snapshot)
package_refs.extend(packages)
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': snapshots,
'Description': "Merged from sources: %s" % ', '.join("'%s'" % snap for snap in snapshots),
'PackageRefs': package_refs,
}
)
except AptlyException as e:
if e.res.status_code == 400:
lg.warning("Error creating snapshot %s, assuming it already exists" % snapshot_name)
else:
raise
self.publish_snapshots.append({
'Component': component,
'Name': snapshot_name
}) | python | def merge_snapshots(self):
"""
Create component snapshots by merging other snapshots of same component
"""
self.publish_snapshots = []
for component, snapshots in self.components.items():
if len(snapshots) <= 1:
# Only one snapshot, no need to merge
lg.debug("Component %s has only one snapshot %s, not creating merge snapshot" % (component, snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': snapshots[0]
})
continue
# Look if merged snapshot doesn't already exist
remote_snapshot = self._find_snapshot(r'^%s%s-%s-\d+' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component))
if remote_snapshot:
source_snapshots = self._get_source_snapshots(remote_snapshot)
# Check if latest merged snapshot has same source snapshots like us
snapshots_want = list(snapshots)
snapshots_want.sort()
lg.debug("Comparing snapshots: snapshot_name=%s, snapshot_sources=%s, wanted_sources=%s" % (remote_snapshot['Name'], source_snapshots, snapshots_want))
if snapshots_want == source_snapshots:
lg.info("Remote merge snapshot already exists: %s (%s)" % (remote_snapshot['Name'], source_snapshots))
self.publish_snapshots.append({
'Component': component,
'Name': remote_snapshot['Name']
})
continue
snapshot_name = '%s%s-%s-%s' % (self.merge_prefix, self.name.replace('./', '').replace('/', '-'), component, self.timestamp)
lg.info("Creating merge snapshot %s for component %s of snapshots %s" % (snapshot_name, component, snapshots))
package_refs = []
for snapshot in snapshots:
# Get package refs from each snapshot
packages = self._get_packages(self.client, "snapshots", snapshot)
package_refs.extend(packages)
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': snapshots,
'Description': "Merged from sources: %s" % ', '.join("'%s'" % snap for snap in snapshots),
'PackageRefs': package_refs,
}
)
except AptlyException as e:
if e.res.status_code == 400:
lg.warning("Error creating snapshot %s, assuming it already exists" % snapshot_name)
else:
raise
self.publish_snapshots.append({
'Component': component,
'Name': snapshot_name
}) | [
"def",
"merge_snapshots",
"(",
"self",
")",
":",
"self",
".",
"publish_snapshots",
"=",
"[",
"]",
"for",
"component",
",",
"snapshots",
"in",
"self",
".",
"components",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"snapshots",
")",
"<=",
"1",
":",
... | Create component snapshots by merging other snapshots of same component | [
"Create",
"component",
"snapshots",
"by",
"merging",
"other",
"snapshots",
"of",
"same",
"component"
] | 7eb4ce1c508666bad0e6a0d4c5c561b1485ed558 | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L630-L690 | train |
ejhigson/nestcheck | nestcheck/io_utils.py | timing_decorator | def timing_decorator(func):
"""Prints the time func takes to execute."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wrapper for printing execution time.
Parameters
----------
print_time: bool, optional
whether or not to print time function takes.
"""
print_time = kwargs.pop('print_time', False)
if not print_time:
return func(*args, **kwargs)
else:
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(func.__name__ + ' took %.3f seconds' %
(end_time - start_time))
return result
return wrapper | python | def timing_decorator(func):
"""Prints the time func takes to execute."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wrapper for printing execution time.
Parameters
----------
print_time: bool, optional
whether or not to print time function takes.
"""
print_time = kwargs.pop('print_time', False)
if not print_time:
return func(*args, **kwargs)
else:
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(func.__name__ + ' took %.3f seconds' %
(end_time - start_time))
return result
return wrapper | [
"def",
"timing_decorator",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Wrapper for printing execution time.\n\n Parameters\n ----------\n... | Prints the time func takes to execute. | [
"Prints",
"the",
"time",
"func",
"takes",
"to",
"execute",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/io_utils.py#L14-L36 | train |
ejhigson/nestcheck | nestcheck/io_utils.py | save_load_result | def save_load_result(func):
"""Saves and/or loads func output (must be picklable)."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Default behavior is no saving and loading. Specify save_name to save
and load.
Parameters
----------
save_name: str, optional
File name including directory and excluding extension.
save: bool, optional
Whether or not to save.
load: bool, optional
Whether or not to load.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name.
warn_if_error: bool, optional
Whether or not to issue UserWarning if load=True and save_name
is not None but there is an error loading.
Returns
-------
Result
func output.
"""
save_name = kwargs.pop('save_name', None)
save = kwargs.pop('save', save_name is not None)
load = kwargs.pop('load', save_name is not None)
overwrite_existing = kwargs.pop('overwrite_existing', True)
warn_if_error = kwargs.pop('warn_if_error', False)
if load:
if save_name is None:
warnings.warn(
('{} has load=True but cannot load because '
'save_name=None'.format(func.__name__)),
UserWarning)
else:
try:
return pickle_load(save_name)
except (OSError, IOError) as err:
if warn_if_error:
msg = ('{} had {} loading file {}.'.format(
func.__name__, type(err).__name__, save_name))
msg = ' Continuing without loading.'
warnings.warn(msg, UserWarning)
result = func(*args, **kwargs)
if save:
if save_name is None:
warnings.warn((func.__name__ + ' has save=True but cannot ' +
'save because save_name=None'), UserWarning)
else:
pickle_save(result, save_name,
overwrite_existing=overwrite_existing)
return result
return wrapper | python | def save_load_result(func):
"""Saves and/or loads func output (must be picklable)."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Default behavior is no saving and loading. Specify save_name to save
and load.
Parameters
----------
save_name: str, optional
File name including directory and excluding extension.
save: bool, optional
Whether or not to save.
load: bool, optional
Whether or not to load.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name.
warn_if_error: bool, optional
Whether or not to issue UserWarning if load=True and save_name
is not None but there is an error loading.
Returns
-------
Result
func output.
"""
save_name = kwargs.pop('save_name', None)
save = kwargs.pop('save', save_name is not None)
load = kwargs.pop('load', save_name is not None)
overwrite_existing = kwargs.pop('overwrite_existing', True)
warn_if_error = kwargs.pop('warn_if_error', False)
if load:
if save_name is None:
warnings.warn(
('{} has load=True but cannot load because '
'save_name=None'.format(func.__name__)),
UserWarning)
else:
try:
return pickle_load(save_name)
except (OSError, IOError) as err:
if warn_if_error:
msg = ('{} had {} loading file {}.'.format(
func.__name__, type(err).__name__, save_name))
msg = ' Continuing without loading.'
warnings.warn(msg, UserWarning)
result = func(*args, **kwargs)
if save:
if save_name is None:
warnings.warn((func.__name__ + ' has save=True but cannot ' +
'save because save_name=None'), UserWarning)
else:
pickle_save(result, save_name,
overwrite_existing=overwrite_existing)
return result
return wrapper | [
"def",
"save_load_result",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Default behavior is no saving and loading. Specify save_name to save\n an... | Saves and/or loads func output (must be picklable). | [
"Saves",
"and",
"/",
"or",
"loads",
"func",
"output",
"(",
"must",
"be",
"picklable",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/io_utils.py#L39-L97 | train |
ejhigson/nestcheck | nestcheck/io_utils.py | pickle_save | def pickle_save(data, name, **kwargs):
"""Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name.
"""
extension = kwargs.pop('extension', '.pkl')
overwrite_existing = kwargs.pop('overwrite_existing', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
filename = name + extension
# Check if the target directory exists and if not make it
dirname = os.path.dirname(filename)
if not os.path.exists(dirname) and dirname != '':
os.makedirs(dirname)
if os.path.isfile(filename) and not overwrite_existing:
print(filename + ' already exists! Saving with time appended')
filename = name + '_' + time.asctime().replace(' ', '_')
filename += extension
# check if permission error is defined (was not before python 3.3)
# and otherwise use IOError
try:
PermissionError
except NameError:
PermissionError = IOError
try:
outfile = open(filename, 'wb')
pickle.dump(data, outfile)
outfile.close()
except (MemoryError, PermissionError) as err:
warnings.warn((type(err).__name__ + ' in pickle_save: continue without'
' saving.'), UserWarning) | python | def pickle_save(data, name, **kwargs):
"""Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name.
"""
extension = kwargs.pop('extension', '.pkl')
overwrite_existing = kwargs.pop('overwrite_existing', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
filename = name + extension
# Check if the target directory exists and if not make it
dirname = os.path.dirname(filename)
if not os.path.exists(dirname) and dirname != '':
os.makedirs(dirname)
if os.path.isfile(filename) and not overwrite_existing:
print(filename + ' already exists! Saving with time appended')
filename = name + '_' + time.asctime().replace(' ', '_')
filename += extension
# check if permission error is defined (was not before python 3.3)
# and otherwise use IOError
try:
PermissionError
except NameError:
PermissionError = IOError
try:
outfile = open(filename, 'wb')
pickle.dump(data, outfile)
outfile.close()
except (MemoryError, PermissionError) as err:
warnings.warn((type(err).__name__ + ' in pickle_save: continue without'
' saving.'), UserWarning) | [
"def",
"pickle_save",
"(",
"data",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"extension",
"=",
"kwargs",
".",
"pop",
"(",
"'extension'",
",",
"'.pkl'",
")",
"overwrite_existing",
"=",
"kwargs",
".",
"pop",
"(",
"'overwrite_existing'",
",",
"True",
... | Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains file: if True, file will be
overwritten, if False the data will be saved with the system time
appended to the file name. | [
"Saves",
"object",
"with",
"pickle",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/io_utils.py#L101-L142 | train |
ejhigson/nestcheck | nestcheck/io_utils.py | pickle_load | def pickle_load(name, extension='.pkl'):
"""Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path.
"""
filename = name + extension
infile = open(filename, 'rb')
data = pickle.load(infile)
infile.close()
return data | python | def pickle_load(name, extension='.pkl'):
"""Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path.
"""
filename = name + extension
infile = open(filename, 'rb')
data = pickle.load(infile)
infile.close()
return data | [
"def",
"pickle_load",
"(",
"name",
",",
"extension",
"=",
"'.pkl'",
")",
":",
"filename",
"=",
"name",
"+",
"extension",
"infile",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"data",
"=",
"pickle",
".",
"load",
"(",
"infile",
")",
"infile",
".",
... | Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path. | [
"Load",
"data",
"with",
"pickle",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/io_utils.py#L146-L164 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | bootstrap_resample_run | def bootstrap_resample_run(ns_run, threads=None, ninit_sep=False,
random_seed=False):
"""Bootstrap resamples threads of nested sampling run, returning a new
(resampled) nested sampling run.
Get the individual threads for a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
threads: None or list of numpy arrays, optional
ninit_sep: bool
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
random_seed: None, bool or int, optional
Set numpy random seed. Default is to use None (so a random seed is
chosen from the computer's internal state) to ensure reliable results
when multiprocessing. Can set to an integer or to False to not edit the
seed.
Returns
-------
ns_run_temp: dict
Nested sampling run dictionary.
"""
if random_seed is not False:
# save the random state so we don't affect other bits of the code
state = np.random.get_state()
np.random.seed(random_seed)
if threads is None:
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
n_threads = len(threads)
if ninit_sep:
try:
ninit = ns_run['settings']['ninit']
assert np.all(ns_run['thread_min_max'][:ninit, 0] == -np.inf), (
'ninit_sep assumes the initial threads are labeled '
'(0,...,ninit-1), so these should start by sampling the whole '
'prior.')
inds = np.random.randint(0, ninit, ninit)
inds = np.append(inds, np.random.randint(ninit, n_threads,
n_threads - ninit))
except KeyError:
warnings.warn((
'bootstrap_resample_run has kwarg ninit_sep=True but '
'ns_run["settings"]["ninit"] does not exist. Doing bootstrap '
'with ninit_sep=False'), UserWarning)
ninit_sep = False
if not ninit_sep:
inds = np.random.randint(0, n_threads, n_threads)
threads_temp = [threads[i] for i in inds]
resampled_run = nestcheck.ns_run_utils.combine_threads(threads_temp)
try:
resampled_run['settings'] = ns_run['settings']
except KeyError:
pass
if random_seed is not False:
# if we have used a random seed then return to the original state
np.random.set_state(state)
return resampled_run | python | def bootstrap_resample_run(ns_run, threads=None, ninit_sep=False,
random_seed=False):
"""Bootstrap resamples threads of nested sampling run, returning a new
(resampled) nested sampling run.
Get the individual threads for a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
threads: None or list of numpy arrays, optional
ninit_sep: bool
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
random_seed: None, bool or int, optional
Set numpy random seed. Default is to use None (so a random seed is
chosen from the computer's internal state) to ensure reliable results
when multiprocessing. Can set to an integer or to False to not edit the
seed.
Returns
-------
ns_run_temp: dict
Nested sampling run dictionary.
"""
if random_seed is not False:
# save the random state so we don't affect other bits of the code
state = np.random.get_state()
np.random.seed(random_seed)
if threads is None:
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
n_threads = len(threads)
if ninit_sep:
try:
ninit = ns_run['settings']['ninit']
assert np.all(ns_run['thread_min_max'][:ninit, 0] == -np.inf), (
'ninit_sep assumes the initial threads are labeled '
'(0,...,ninit-1), so these should start by sampling the whole '
'prior.')
inds = np.random.randint(0, ninit, ninit)
inds = np.append(inds, np.random.randint(ninit, n_threads,
n_threads - ninit))
except KeyError:
warnings.warn((
'bootstrap_resample_run has kwarg ninit_sep=True but '
'ns_run["settings"]["ninit"] does not exist. Doing bootstrap '
'with ninit_sep=False'), UserWarning)
ninit_sep = False
if not ninit_sep:
inds = np.random.randint(0, n_threads, n_threads)
threads_temp = [threads[i] for i in inds]
resampled_run = nestcheck.ns_run_utils.combine_threads(threads_temp)
try:
resampled_run['settings'] = ns_run['settings']
except KeyError:
pass
if random_seed is not False:
# if we have used a random seed then return to the original state
np.random.set_state(state)
return resampled_run | [
"def",
"bootstrap_resample_run",
"(",
"ns_run",
",",
"threads",
"=",
"None",
",",
"ninit_sep",
"=",
"False",
",",
"random_seed",
"=",
"False",
")",
":",
"if",
"random_seed",
"is",
"not",
"False",
":",
"# save the random state so we don't affect other bits of the code"... | Bootstrap resamples threads of nested sampling run, returning a new
(resampled) nested sampling run.
Get the individual threads for a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
threads: None or list of numpy arrays, optional
ninit_sep: bool
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
random_seed: None, bool or int, optional
Set numpy random seed. Default is to use None (so a random seed is
chosen from the computer's internal state) to ensure reliable results
when multiprocessing. Can set to an integer or to False to not edit the
seed.
Returns
-------
ns_run_temp: dict
Nested sampling run dictionary. | [
"Bootstrap",
"resamples",
"threads",
"of",
"nested",
"sampling",
"run",
"returning",
"a",
"new",
"(",
"resampled",
")",
"nested",
"sampling",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L17-L80 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | run_std_bootstrap | def run_std_bootstrap(ns_run, estimator_list, **kwargs):
"""
Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
kwargs: dict
kwargs for run_bootstrap_values
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
stds = np.zeros(bs_values.shape[0])
for j, _ in enumerate(stds):
stds[j] = np.std(bs_values[j, :], ddof=1)
return stds | python | def run_std_bootstrap(ns_run, estimator_list, **kwargs):
"""
Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
kwargs: dict
kwargs for run_bootstrap_values
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
stds = np.zeros(bs_values.shape[0])
for j, _ in enumerate(stds):
stds[j] = np.std(bs_values[j, :], ddof=1)
return stds | [
"def",
"run_std_bootstrap",
"(",
"ns_run",
",",
"estimator_list",
",",
"*",
"*",
"kwargs",
")",
":",
"bs_values",
"=",
"run_bootstrap_values",
"(",
"ns_run",
",",
"estimator_list",
",",
"*",
"*",
"kwargs",
")",
"stds",
"=",
"np",
".",
"zeros",
"(",
"bs_val... | Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
kwargs: dict
kwargs for run_bootstrap_values
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | [
"Uses",
"bootstrap",
"resampling",
"to",
"calculate",
"an",
"estimate",
"of",
"the",
"standard",
"deviation",
"of",
"the",
"distribution",
"of",
"sampling",
"errors",
"(",
"the",
"uncertainty",
"on",
"the",
"calculation",
")",
"for",
"a",
"single",
"nested",
"... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L83-L114 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | run_bootstrap_values | def run_bootstrap_values(ns_run, estimator_list, **kwargs):
"""Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
ninit_sep: bool, optional
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
flip_skew: bool, optional
Determine if distribution of bootstrap values should be flipped about
its mean to better represent our probability distribution on the true
value - see "Bayesian astrostatistics: a backward look to the future"
(Loredo, 2012 Figure 2) for an explanation.
If true, the samples :math:`X` are mapped to :math:`2 \mu - X`, where
:math:`\mu` is the mean sample value.
This leaves the mean and standard deviation unchanged.
random_seeds: list, optional
list of random_seed arguments for bootstrap_resample_run.
Defaults to range(n_simulate) in order to give reproducible results.
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
ninit_sep = kwargs.pop('ninit_sep', False)
flip_skew = kwargs.pop('flip_skew', True)
n_simulate = kwargs.pop('n_simulate') # No default, must specify
random_seeds = kwargs.pop('random_seeds', range(n_simulate))
assert len(random_seeds) == n_simulate
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
bs_values = np.zeros((len(estimator_list), n_simulate))
for i, random_seed in enumerate(random_seeds):
ns_run_temp = bootstrap_resample_run(
ns_run, threads=threads, ninit_sep=ninit_sep,
random_seed=random_seed)
bs_values[:, i] = nestcheck.ns_run_utils.run_estimators(
ns_run_temp, estimator_list)
del ns_run_temp
if flip_skew:
estimator_means = np.mean(bs_values, axis=1)
for i, mu in enumerate(estimator_means):
bs_values[i, :] = (2 * mu) - bs_values[i, :]
return bs_values | python | def run_bootstrap_values(ns_run, estimator_list, **kwargs):
"""Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
ninit_sep: bool, optional
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
flip_skew: bool, optional
Determine if distribution of bootstrap values should be flipped about
its mean to better represent our probability distribution on the true
value - see "Bayesian astrostatistics: a backward look to the future"
(Loredo, 2012 Figure 2) for an explanation.
If true, the samples :math:`X` are mapped to :math:`2 \mu - X`, where
:math:`\mu` is the mean sample value.
This leaves the mean and standard deviation unchanged.
random_seeds: list, optional
list of random_seed arguments for bootstrap_resample_run.
Defaults to range(n_simulate) in order to give reproducible results.
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
ninit_sep = kwargs.pop('ninit_sep', False)
flip_skew = kwargs.pop('flip_skew', True)
n_simulate = kwargs.pop('n_simulate') # No default, must specify
random_seeds = kwargs.pop('random_seeds', range(n_simulate))
assert len(random_seeds) == n_simulate
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
bs_values = np.zeros((len(estimator_list), n_simulate))
for i, random_seed in enumerate(random_seeds):
ns_run_temp = bootstrap_resample_run(
ns_run, threads=threads, ninit_sep=ninit_sep,
random_seed=random_seed)
bs_values[:, i] = nestcheck.ns_run_utils.run_estimators(
ns_run_temp, estimator_list)
del ns_run_temp
if flip_skew:
estimator_means = np.mean(bs_values, axis=1)
for i, mu in enumerate(estimator_means):
bs_values[i, :] = (2 * mu) - bs_values[i, :]
return bs_values | [
"def",
"run_bootstrap_values",
"(",
"ns_run",
",",
"estimator_list",
",",
"*",
"*",
"kwargs",
")",
":",
"ninit_sep",
"=",
"kwargs",
".",
"pop",
"(",
"'ninit_sep'",
",",
"False",
")",
"flip_skew",
"=",
"kwargs",
".",
"pop",
"(",
"'flip_skew'",
",",
"True",
... | Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
ninit_sep: bool, optional
For dynamic runs: resample initial threads and dynamically added
threads separately. Useful when there are only a few threads which
start by sampling the whole prior, as errors occur if none of these are
included in the bootstrap resample.
flip_skew: bool, optional
Determine if distribution of bootstrap values should be flipped about
its mean to better represent our probability distribution on the true
value - see "Bayesian astrostatistics: a backward look to the future"
(Loredo, 2012 Figure 2) for an explanation.
If true, the samples :math:`X` are mapped to :math:`2 \mu - X`, where
:math:`\mu` is the mean sample value.
This leaves the mean and standard deviation unchanged.
random_seeds: list, optional
list of random_seed arguments for bootstrap_resample_run.
Defaults to range(n_simulate) in order to give reproducible results.
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | [
"Uses",
"bootstrap",
"resampling",
"to",
"calculate",
"an",
"estimate",
"of",
"the",
"standard",
"deviation",
"of",
"the",
"distribution",
"of",
"sampling",
"errors",
"(",
"the",
"uncertainty",
"on",
"the",
"calculation",
")",
"for",
"a",
"single",
"nested",
"... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L117-L178 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | run_ci_bootstrap | def run_ci_bootstrap(ns_run, estimator_list, **kwargs):
"""Uses bootstrap resampling to calculate credible intervals on the
distribution of sampling errors (the uncertainty on the calculation)
for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
cred_int: float
n_simulate: int
ninit_sep: bool, optional
Returns
-------
output: 1d numpy array
Credible interval on sampling error on calculation result for each
estimator in estimator_list.
"""
cred_int = kwargs.pop('cred_int') # No default, must specify
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
# estimate specific confidence intervals
# formulae for alpha CI on estimator T = 2 T(x) - G^{-1}(T(x*))
# where G is the CDF of the bootstrap resamples
expected_estimators = nestcheck.ns_run_utils.run_estimators(
ns_run, estimator_list)
cdf = ((np.asarray(range(bs_values.shape[1])) + 0.5) /
bs_values.shape[1])
ci_output = expected_estimators * 2
for i, _ in enumerate(ci_output):
ci_output[i] -= np.interp(
1. - cred_int, cdf, np.sort(bs_values[i, :]))
return ci_output | python | def run_ci_bootstrap(ns_run, estimator_list, **kwargs):
"""Uses bootstrap resampling to calculate credible intervals on the
distribution of sampling errors (the uncertainty on the calculation)
for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
cred_int: float
n_simulate: int
ninit_sep: bool, optional
Returns
-------
output: 1d numpy array
Credible interval on sampling error on calculation result for each
estimator in estimator_list.
"""
cred_int = kwargs.pop('cred_int') # No default, must specify
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
# estimate specific confidence intervals
# formulae for alpha CI on estimator T = 2 T(x) - G^{-1}(T(x*))
# where G is the CDF of the bootstrap resamples
expected_estimators = nestcheck.ns_run_utils.run_estimators(
ns_run, estimator_list)
cdf = ((np.asarray(range(bs_values.shape[1])) + 0.5) /
bs_values.shape[1])
ci_output = expected_estimators * 2
for i, _ in enumerate(ci_output):
ci_output[i] -= np.interp(
1. - cred_int, cdf, np.sort(bs_values[i, :]))
return ci_output | [
"def",
"run_ci_bootstrap",
"(",
"ns_run",
",",
"estimator_list",
",",
"*",
"*",
"kwargs",
")",
":",
"cred_int",
"=",
"kwargs",
".",
"pop",
"(",
"'cred_int'",
")",
"# No default, must specify",
"bs_values",
"=",
"run_bootstrap_values",
"(",
"ns_run",
",",
"estima... | Uses bootstrap resampling to calculate credible intervals on the
distribution of sampling errors (the uncertainty on the calculation)
for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
Bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
cred_int: float
n_simulate: int
ninit_sep: bool, optional
Returns
-------
output: 1d numpy array
Credible interval on sampling error on calculation result for each
estimator in estimator_list. | [
"Uses",
"bootstrap",
"resampling",
"to",
"calculate",
"credible",
"intervals",
"on",
"the",
"distribution",
"of",
"sampling",
"errors",
"(",
"the",
"uncertainty",
"on",
"the",
"calculation",
")",
"for",
"a",
"single",
"nested",
"sampling",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L181-L221 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | run_std_simulate | def run_std_simulate(ns_run, estimator_list, n_simulate=None):
"""Uses the 'simulated weights' method to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
Note that the simulated weights method is not accurate for parameter
estimation calculations.
For more details about the simulated weights method for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
assert n_simulate is not None, 'run_std_simulate: must specify n_simulate'
all_values = np.zeros((len(estimator_list), n_simulate))
for i in range(n_simulate):
all_values[:, i] = nestcheck.ns_run_utils.run_estimators(
ns_run, estimator_list, simulate=True)
stds = np.zeros(all_values.shape[0])
for i, _ in enumerate(stds):
stds[i] = np.std(all_values[i, :], ddof=1)
return stds | python | def run_std_simulate(ns_run, estimator_list, n_simulate=None):
"""Uses the 'simulated weights' method to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
Note that the simulated weights method is not accurate for parameter
estimation calculations.
For more details about the simulated weights method for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list.
"""
assert n_simulate is not None, 'run_std_simulate: must specify n_simulate'
all_values = np.zeros((len(estimator_list), n_simulate))
for i in range(n_simulate):
all_values[:, i] = nestcheck.ns_run_utils.run_estimators(
ns_run, estimator_list, simulate=True)
stds = np.zeros(all_values.shape[0])
for i, _ in enumerate(stds):
stds[i] = np.std(all_values[i, :], ddof=1)
return stds | [
"def",
"run_std_simulate",
"(",
"ns_run",
",",
"estimator_list",
",",
"n_simulate",
"=",
"None",
")",
":",
"assert",
"n_simulate",
"is",
"not",
"None",
",",
"'run_std_simulate: must specify n_simulate'",
"all_values",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(... | Uses the 'simulated weights' method to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
Note that the simulated weights method is not accurate for parameter
estimation calculations.
For more details about the simulated weights method for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation'
(Higson et al. 2018).
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions for estimating quantities (such as the
bayesian evidence or mean of parameters) from nested sampling runs.
Example functions can be found in estimators.py. Each should have
arguments: func(ns_run, logw=None)
n_simulate: int
Returns
-------
output: 1d numpy array
Sampling error on calculation result for each estimator in
estimator_list. | [
"Uses",
"the",
"simulated",
"weights",
"method",
"to",
"calculate",
"an",
"estimate",
"of",
"the",
"standard",
"deviation",
"of",
"the",
"distribution",
"of",
"sampling",
"errors",
"(",
"the",
"uncertainty",
"on",
"the",
"calculation",
")",
"for",
"a",
"single... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L224-L260 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | implementation_std | def implementation_std(vals_std, vals_std_u, bs_std, bs_std_u, **kwargs):
r"""Estimates varaition of results due to implementation-specific
effects. See 'nestcheck: diagnostic tests for nested sampling calculations'
(Higson et al. 2019) for more details.
Uncertainties on the output are calculated numerically using the fact
that (from central limit theorem) our uncertainties on vals_std and
bs_std are (approximately) normally distributed. This is needed as
results from standard error propagation techniques are not valid when
the uncertainties are not small compared to the result.
Parameters
----------
vals_std: numpy array
Standard deviations of results from repeated calculations.
vals_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
bs_std: numpy array
Bootstrap error estimates. Each element should correspond to the same
element in vals_std.
bs_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
nsim: int, optional
Number of simulations to use to numerically calculate the uncertainties
on the estimated implementation-specific effects.
random_seed: int or None, optional
Numpy random seed. Use to get reproducible uncertainties on the output.
Returns
-------
imp_std: numpy array
Estimated standard deviation of results due to implementation-specific
effects.
imp_std_u: numpy array
:math:`1\sigma` uncertainties on imp_std.
imp_frac: numpy array
imp_std as a fraction of vals_std.
imp_frac_u:
:math:`1\sigma` uncertainties on imp_frac.
"""
nsim = kwargs.pop('nsim', 1000000)
random_seed = kwargs.pop('random_seed', 0)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# if the implementation errors are uncorrelated with the
# sampling errrors: var results = var imp + var sampling
# so std imp = sqrt(var results - var sampling)
imp_var = (vals_std ** 2) - (bs_std ** 2)
imp_std = np.sqrt(np.abs(imp_var)) * np.sign(imp_var)
ind = np.where(imp_std <= 0)[0]
imp_std[ind] = 0
imp_std_u = np.zeros(imp_std.shape)
imp_frac = imp_std / vals_std
imp_frac_u = np.zeros(imp_frac.shape)
# Simulate errors distributions
for i, _ in enumerate(imp_std_u):
state = np.random.get_state()
np.random.seed(random_seed)
sim_vals_std = np.random.normal(vals_std[i], vals_std_u[i], size=nsim)
sim_bs_std = np.random.normal(bs_std[i], bs_std_u[i], size=nsim)
sim_imp_var = (sim_vals_std ** 2) - (sim_bs_std ** 2)
sim_imp_std = np.sqrt(np.abs(sim_imp_var)) * np.sign(sim_imp_var)
imp_std_u[i] = np.std(sim_imp_std, ddof=1)
imp_frac_u[i] = np.std((sim_imp_std / sim_vals_std), ddof=1)
np.random.set_state(state)
return imp_std, imp_std_u, imp_frac, imp_frac_u | python | def implementation_std(vals_std, vals_std_u, bs_std, bs_std_u, **kwargs):
r"""Estimates varaition of results due to implementation-specific
effects. See 'nestcheck: diagnostic tests for nested sampling calculations'
(Higson et al. 2019) for more details.
Uncertainties on the output are calculated numerically using the fact
that (from central limit theorem) our uncertainties on vals_std and
bs_std are (approximately) normally distributed. This is needed as
results from standard error propagation techniques are not valid when
the uncertainties are not small compared to the result.
Parameters
----------
vals_std: numpy array
Standard deviations of results from repeated calculations.
vals_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
bs_std: numpy array
Bootstrap error estimates. Each element should correspond to the same
element in vals_std.
bs_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
nsim: int, optional
Number of simulations to use to numerically calculate the uncertainties
on the estimated implementation-specific effects.
random_seed: int or None, optional
Numpy random seed. Use to get reproducible uncertainties on the output.
Returns
-------
imp_std: numpy array
Estimated standard deviation of results due to implementation-specific
effects.
imp_std_u: numpy array
:math:`1\sigma` uncertainties on imp_std.
imp_frac: numpy array
imp_std as a fraction of vals_std.
imp_frac_u:
:math:`1\sigma` uncertainties on imp_frac.
"""
nsim = kwargs.pop('nsim', 1000000)
random_seed = kwargs.pop('random_seed', 0)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# if the implementation errors are uncorrelated with the
# sampling errrors: var results = var imp + var sampling
# so std imp = sqrt(var results - var sampling)
imp_var = (vals_std ** 2) - (bs_std ** 2)
imp_std = np.sqrt(np.abs(imp_var)) * np.sign(imp_var)
ind = np.where(imp_std <= 0)[0]
imp_std[ind] = 0
imp_std_u = np.zeros(imp_std.shape)
imp_frac = imp_std / vals_std
imp_frac_u = np.zeros(imp_frac.shape)
# Simulate errors distributions
for i, _ in enumerate(imp_std_u):
state = np.random.get_state()
np.random.seed(random_seed)
sim_vals_std = np.random.normal(vals_std[i], vals_std_u[i], size=nsim)
sim_bs_std = np.random.normal(bs_std[i], bs_std_u[i], size=nsim)
sim_imp_var = (sim_vals_std ** 2) - (sim_bs_std ** 2)
sim_imp_std = np.sqrt(np.abs(sim_imp_var)) * np.sign(sim_imp_var)
imp_std_u[i] = np.std(sim_imp_std, ddof=1)
imp_frac_u[i] = np.std((sim_imp_std / sim_vals_std), ddof=1)
np.random.set_state(state)
return imp_std, imp_std_u, imp_frac, imp_frac_u | [
"def",
"implementation_std",
"(",
"vals_std",
",",
"vals_std_u",
",",
"bs_std",
",",
"bs_std_u",
",",
"*",
"*",
"kwargs",
")",
":",
"nsim",
"=",
"kwargs",
".",
"pop",
"(",
"'nsim'",
",",
"1000000",
")",
"random_seed",
"=",
"kwargs",
".",
"pop",
"(",
"'... | r"""Estimates varaition of results due to implementation-specific
effects. See 'nestcheck: diagnostic tests for nested sampling calculations'
(Higson et al. 2019) for more details.
Uncertainties on the output are calculated numerically using the fact
that (from central limit theorem) our uncertainties on vals_std and
bs_std are (approximately) normally distributed. This is needed as
results from standard error propagation techniques are not valid when
the uncertainties are not small compared to the result.
Parameters
----------
vals_std: numpy array
Standard deviations of results from repeated calculations.
vals_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
bs_std: numpy array
Bootstrap error estimates. Each element should correspond to the same
element in vals_std.
bs_std_u: numpy array
:math:`1\sigma` uncertainties on vals_std_u.
nsim: int, optional
Number of simulations to use to numerically calculate the uncertainties
on the estimated implementation-specific effects.
random_seed: int or None, optional
Numpy random seed. Use to get reproducible uncertainties on the output.
Returns
-------
imp_std: numpy array
Estimated standard deviation of results due to implementation-specific
effects.
imp_std_u: numpy array
:math:`1\sigma` uncertainties on imp_std.
imp_frac: numpy array
imp_std as a fraction of vals_std.
imp_frac_u:
:math:`1\sigma` uncertainties on imp_frac. | [
"r",
"Estimates",
"varaition",
"of",
"results",
"due",
"to",
"implementation",
"-",
"specific",
"effects",
".",
"See",
"nestcheck",
":",
"diagnostic",
"tests",
"for",
"nested",
"sampling",
"calculations",
"(",
"Higson",
"et",
"al",
".",
"2019",
")",
"for",
"... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L267-L332 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | run_thread_values | def run_thread_values(run, estimator_list):
"""Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list), len(theads)).
"""
threads = nestcheck.ns_run_utils.get_run_threads(run)
vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list)
for th in threads]
vals_array = np.stack(vals_list, axis=1)
assert vals_array.shape == (len(estimator_list), len(threads))
return vals_array | python | def run_thread_values(run, estimator_list):
"""Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list), len(theads)).
"""
threads = nestcheck.ns_run_utils.get_run_threads(run)
vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list)
for th in threads]
vals_array = np.stack(vals_list, axis=1)
assert vals_array.shape == (len(estimator_list), len(threads))
return vals_array | [
"def",
"run_thread_values",
"(",
"run",
",",
"estimator_list",
")",
":",
"threads",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_run_threads",
"(",
"run",
")",
"vals_list",
"=",
"[",
"nestcheck",
".",
"ns_run_utils",
".",
"run_estimators",
"(",
"th",
",",
... | Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list), len(theads)). | [
"Helper",
"function",
"for",
"parallelising",
"thread_values_df",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L335-L355 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | pairwise_distances | def pairwise_distances(dist_list, earth_mover_dist=True, energy_dist=True):
"""Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
-------
ser: pandas Series object
Values are statistical distances. Index levels are:
calculation type: name of statistical distance.
run: tuple containing the index in dist_list of the pair of samples
arrays from which the statistical distance was computed.
"""
out = []
index = []
for i, samp_i in enumerate(dist_list):
for j, samp_j in enumerate(dist_list):
if j < i:
index.append(str((i, j)))
out.append(statistical_distances(
samp_i, samp_j, earth_mover_dist=earth_mover_dist,
energy_dist=energy_dist))
columns = ['ks pvalue', 'ks distance']
if earth_mover_dist:
columns.append('earth mover distance')
if energy_dist:
columns.append('energy distance')
ser = pd.DataFrame(out, index=index, columns=columns).unstack()
ser.index.names = ['calculation type', 'run']
return ser | python | def pairwise_distances(dist_list, earth_mover_dist=True, energy_dist=True):
"""Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
-------
ser: pandas Series object
Values are statistical distances. Index levels are:
calculation type: name of statistical distance.
run: tuple containing the index in dist_list of the pair of samples
arrays from which the statistical distance was computed.
"""
out = []
index = []
for i, samp_i in enumerate(dist_list):
for j, samp_j in enumerate(dist_list):
if j < i:
index.append(str((i, j)))
out.append(statistical_distances(
samp_i, samp_j, earth_mover_dist=earth_mover_dist,
energy_dist=energy_dist))
columns = ['ks pvalue', 'ks distance']
if earth_mover_dist:
columns.append('earth mover distance')
if energy_dist:
columns.append('energy distance')
ser = pd.DataFrame(out, index=index, columns=columns).unstack()
ser.index.names = ['calculation type', 'run']
return ser | [
"def",
"pairwise_distances",
"(",
"dist_list",
",",
"earth_mover_dist",
"=",
"True",
",",
"energy_dist",
"=",
"True",
")",
":",
"out",
"=",
"[",
"]",
"index",
"=",
"[",
"]",
"for",
"i",
",",
"samp_i",
"in",
"enumerate",
"(",
"dist_list",
")",
":",
"for... | Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
-------
ser: pandas Series object
Values are statistical distances. Index levels are:
calculation type: name of statistical distance.
run: tuple containing the index in dist_list of the pair of samples
arrays from which the statistical distance was computed. | [
"Applies",
"statistical_distances",
"to",
"each",
"unique",
"pair",
"of",
"distribution",
"samples",
"in",
"dist_list",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L358-L394 | train |
ejhigson/nestcheck | nestcheck/error_analysis.py | statistical_distances | def statistical_distances(samples1, samples2, earth_mover_dist=True,
energy_dist=True):
"""Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
"""
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out) | python | def statistical_distances(samples1, samples2, earth_mover_dist=True,
energy_dist=True):
"""Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
"""
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out) | [
"def",
"statistical_distances",
"(",
"samples1",
",",
"samples2",
",",
"earth_mover_dist",
"=",
"True",
",",
"energy_dist",
"=",
"True",
")",
":",
"out",
"=",
"[",
"]",
"temp",
"=",
"scipy",
".",
"stats",
".",
"ks_2samp",
"(",
"samples1",
",",
"samples2",
... | Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array | [
"Compute",
"measures",
"of",
"the",
"statistical",
"distance",
"between",
"samples",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/error_analysis.py#L397-L423 | train |
ejhigson/nestcheck | nestcheck/dummy_data.py | get_dummy_thread | def get_dummy_thread(nsamples, **kwargs):
"""Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if seed is not False:
np.random.seed(seed)
thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range,
'nlive_array': np.full(nsamples, 1.),
'theta': np.random.random((nsamples, ndim)),
'thread_labels': np.zeros(nsamples).astype(int)}
if logl_start != -np.inf:
thread['logl'] += logl_start
thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]])
return thread | python | def get_dummy_thread(nsamples, **kwargs):
"""Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if seed is not False:
np.random.seed(seed)
thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range,
'nlive_array': np.full(nsamples, 1.),
'theta': np.random.random((nsamples, ndim)),
'thread_labels': np.zeros(nsamples).astype(int)}
if logl_start != -np.inf:
thread['logl'] += logl_start
thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]])
return thread | [
"def",
"get_dummy_thread",
"(",
"nsamples",
",",
"*",
"*",
"kwargs",
")",
":",
"seed",
"=",
"kwargs",
".",
"pop",
"(",
"'seed'",
",",
"False",
")",
"ndim",
"=",
"kwargs",
".",
"pop",
"(",
"'ndim'",
",",
"2",
")",
"logl_start",
"=",
"kwargs",
".",
"... | Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | [
"Generate",
"dummy",
"data",
"for",
"a",
"single",
"nested",
"sampling",
"thread",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/dummy_data.py#L11-L47 | train |
ejhigson/nestcheck | nestcheck/dummy_data.py | get_dummy_run | def get_dummy_run(nthread, nsamples, **kwargs):
"""Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = []
# set seed before generating any threads and do not reset for each thread
if seed is not False:
np.random.seed(seed)
threads = []
for _ in range(nthread):
threads.append(get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=logl_start,
logl_range=logl_range))
# Sort threads in order of starting logl so labels match labels that would
# have been given processing a dead points array. N.B. this only works when
# all threads have same start_logl
threads = sorted(threads, key=lambda th: th['logl'][0])
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
# Use combine_ns_runs rather than combine threads as this relabels the
# threads according to their order
return nestcheck.ns_run_utils.combine_threads(threads) | python | def get_dummy_run(nthread, nsamples, **kwargs):
"""Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = []
# set seed before generating any threads and do not reset for each thread
if seed is not False:
np.random.seed(seed)
threads = []
for _ in range(nthread):
threads.append(get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=logl_start,
logl_range=logl_range))
# Sort threads in order of starting logl so labels match labels that would
# have been given processing a dead points array. N.B. this only works when
# all threads have same start_logl
threads = sorted(threads, key=lambda th: th['logl'][0])
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
# Use combine_ns_runs rather than combine threads as this relabels the
# threads according to their order
return nestcheck.ns_run_utils.combine_threads(threads) | [
"def",
"get_dummy_run",
"(",
"nthread",
",",
"nsamples",
",",
"*",
"*",
"kwargs",
")",
":",
"seed",
"=",
"kwargs",
".",
"pop",
"(",
"'seed'",
",",
"False",
")",
"ndim",
"=",
"kwargs",
".",
"pop",
"(",
"'ndim'",
",",
"2",
")",
"logl_start",
"=",
"kw... | Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | [
"Generate",
"dummy",
"data",
"for",
"a",
"nested",
"sampling",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/dummy_data.py#L50-L96 | train |
ejhigson/nestcheck | nestcheck/dummy_data.py | get_dummy_dynamic_run | def get_dummy_dynamic_run(nsamples, **kwargs):
"""Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
nthread_init = kwargs.pop('nthread_init', 2)
nthread_dyn = kwargs.pop('nthread_dyn', 3)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(
init['logl'], nthread_dyn, replace=True))
threads = nestcheck.ns_run_utils.get_run_threads(init)
# Seed must be False here so it is not set again for each thread
threads += [get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=start,
logl_range=logl_range) for start in dyn_starts]
# make sure the threads have unique labels and combine them
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
# To make sure the thread labelling is same way it would when
# processing a dead points file, tranform into dead points
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples) | python | def get_dummy_dynamic_run(nsamples, **kwargs):
"""Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
nthread_init = kwargs.pop('nthread_init', 2)
nthread_dyn = kwargs.pop('nthread_dyn', 3)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(
init['logl'], nthread_dyn, replace=True))
threads = nestcheck.ns_run_utils.get_run_threads(init)
# Seed must be False here so it is not set again for each thread
threads += [get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=start,
logl_range=logl_range) for start in dyn_starts]
# make sure the threads have unique labels and combine them
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
# To make sure the thread labelling is same way it would when
# processing a dead points file, tranform into dead points
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples) | [
"def",
"get_dummy_dynamic_run",
"(",
"nsamples",
",",
"*",
"*",
"kwargs",
")",
":",
"seed",
"=",
"kwargs",
".",
"pop",
"(",
"'seed'",
",",
"False",
")",
"ndim",
"=",
"kwargs",
".",
"pop",
"(",
"'ndim'",
",",
"2",
")",
"nthread_init",
"=",
"kwargs",
"... | Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | [
"Generate",
"dummy",
"data",
"for",
"a",
"dynamic",
"nested",
"sampling",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/dummy_data.py#L99-L148 | train |
ejhigson/nestcheck | setup.py | get_long_description | def get_long_description():
"""Get PyPI long description from the .rst file."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, '.pypi_long_desc.rst')) as readme_file:
long_description = readme_file.read()
return long_description | python | def get_long_description():
"""Get PyPI long description from the .rst file."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, '.pypi_long_desc.rst')) as readme_file:
long_description = readme_file.read()
return long_description | [
"def",
"get_long_description",
"(",
")",
":",
"pkg_dir",
"=",
"get_package_dir",
"(",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkg_dir",
",",
"'.pypi_long_desc.rst'",
")",
")",
"as",
"readme_file",
":",
"long_description",
"=",
"readme... | Get PyPI long description from the .rst file. | [
"Get",
"PyPI",
"long",
"description",
"from",
"the",
".",
"rst",
"file",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/setup.py#L16-L21 | train |
ejhigson/nestcheck | setup.py | get_version | def get_version():
"""Get single-source __version__."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:
string = ver_file.read()
return string.strip().replace('__version__ = ', '').replace('\'', '') | python | def get_version():
"""Get single-source __version__."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:
string = ver_file.read()
return string.strip().replace('__version__ = ', '').replace('\'', '') | [
"def",
"get_version",
"(",
")",
":",
"pkg_dir",
"=",
"get_package_dir",
"(",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pkg_dir",
",",
"'nestcheck/_version.py'",
")",
")",
"as",
"ver_file",
":",
"string",
"=",
"ver_file",
".",
"read"... | Get single-source __version__. | [
"Get",
"single",
"-",
"source",
"__version__",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/setup.py#L24-L29 | train |
ejhigson/nestcheck | nestcheck/plots.py | plot_run_nlive | def plot_run_nlive(method_names, run_dict, **kwargs):
"""Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper (Higson et al. 2019).
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining.
Parameters
----------
method_names: list of strs
run_dict: dict of lists of nested sampling runs.
Keys of run_dict must be method_names.
logx_given_logl: function, optional
For mapping points' logl values to logx values.
If not specified the logx coordinates for each run are estimated using
its numbers of live points.
logl_given_logx: function, optional
For calculating the relative posterior mass and posterior mass
remaining at each logx coordinate.
logx_min: float, optional
Lower limit of logx axis. If not specified this is set to the lowest
logx reached by any of the runs.
ymax: bool, optional
Maximum value for plot's nlive axis (yaxis).
npoints: int, optional
Number of points to have in the fgivenx plot grids.
figsize: tuple, optional
Size of figure in inches.
post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
posterior mass curve. If None, all runs are used.
cum_post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
cumulative posterior mass remaining curve. If None, all runs are used.
Returns
-------
fig: matplotlib figure
"""
logx_given_logl = kwargs.pop('logx_given_logl', None)
logl_given_logx = kwargs.pop('logl_given_logx', None)
logx_min = kwargs.pop('logx_min', None)
ymax = kwargs.pop('ymax', None)
npoints = kwargs.pop('npoints', 100)
figsize = kwargs.pop('figsize', (6.4, 2))
post_mass_norm = kwargs.pop('post_mass_norm', None)
cum_post_mass_norm = kwargs.pop('cum_post_mass_norm', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert set(method_names) == set(run_dict.keys()), (
'input method names=' + str(method_names) + ' do not match run_dict '
'keys=' + str(run_dict.keys()))
# Plotting
# --------
fig = plt.figure(figsize=figsize)
ax = plt.gca()
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Reserve colors for certain common method_names so they are always the
# same regardless of method_name order for consistency in the paper.
linecolor_dict = {'standard': colors[2],
'dynamic $G=0$': colors[8],
'dynamic $G=1$': colors[9]}
ax.set_prop_cycle('color', [colors[i] for i in [4, 1, 6, 0, 3, 5, 7]])
integrals_dict = {}
logx_min_list = []
for method_name in method_names:
integrals = np.zeros(len(run_dict[method_name]))
for nr, run in enumerate(run_dict[method_name]):
if 'logx' in run:
logx = run['logx']
elif logx_given_logl is not None:
logx = logx_given_logl(run['logl'])
else:
logx = nestcheck.ns_run_utils.get_logx(
run['nlive_array'], simulate=False)
logx_min_list.append(logx[-1])
logx[0] = 0 # to make lines extend all the way to the end
if nr == 0:
# Label the first line and store it so we can access its color
try:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name,
color=linecolor_dict[method_name])
except KeyError:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name)
else:
# Set other lines to same color and don't add labels
ax.plot(logx, run['nlive_array'], linewidth=1,
color=line.get_color())
# for normalising analytic weight lines
integrals[nr] = -np.trapz(run['nlive_array'], x=logx)
integrals_dict[method_name] = integrals[np.isfinite(integrals)]
# if not specified, set logx min to the lowest logx reached by a run
if logx_min is None:
logx_min = np.asarray(logx_min_list).min()
if logl_given_logx is not None:
# Plot analytic posterior mass and cumulative posterior mass
logx_plot = np.linspace(logx_min, 0, npoints)
logl = logl_given_logx(logx_plot)
# Remove any NaNs
logx_plot = logx_plot[np.where(~np.isnan(logl))[0]]
logl = logl[np.where(~np.isnan(logl))[0]]
w_an = rel_posterior_mass(logx_plot, logl)
# Try normalising the analytic distribution of posterior mass to have
# the same area under the curve as the runs with dynamic_goal=1 (the
# ones which we want to compare to it). If they are not available just
# normalise it to the average area under all the runs (which should be
# about the same if they have the same number of samples).
w_an *= average_by_key(integrals_dict, post_mass_norm)
ax.plot(logx_plot, w_an,
linewidth=2, label='relative posterior mass',
linestyle=':', color='k')
# plot cumulative posterior mass
w_an_c = np.cumsum(w_an)
w_an_c /= np.trapz(w_an_c, x=logx_plot)
# Try normalising the cumulative distribution of posterior mass to have
# the same area under the curve as the runs with dynamic_goal=0 (the
# ones which we want to compare to it). If they are not available just
# normalise it to the average area under all the runs (which should be
# about the same if they have the same number of samples).
w_an_c *= average_by_key(integrals_dict, cum_post_mass_norm)
ax.plot(logx_plot, w_an_c, linewidth=2, linestyle='--', dashes=(2, 3),
label='posterior mass remaining', color='darkblue')
ax.set_ylabel('number of live points')
ax.set_xlabel(r'$\log X $')
# set limits
if ymax is not None:
ax.set_ylim([0, ymax])
else:
ax.set_ylim(bottom=0)
ax.set_xlim([logx_min, 0])
ax.legend()
return fig | python | def plot_run_nlive(method_names, run_dict, **kwargs):
"""Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper (Higson et al. 2019).
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining.
Parameters
----------
method_names: list of strs
run_dict: dict of lists of nested sampling runs.
Keys of run_dict must be method_names.
logx_given_logl: function, optional
For mapping points' logl values to logx values.
If not specified the logx coordinates for each run are estimated using
its numbers of live points.
logl_given_logx: function, optional
For calculating the relative posterior mass and posterior mass
remaining at each logx coordinate.
logx_min: float, optional
Lower limit of logx axis. If not specified this is set to the lowest
logx reached by any of the runs.
ymax: bool, optional
Maximum value for plot's nlive axis (yaxis).
npoints: int, optional
Number of points to have in the fgivenx plot grids.
figsize: tuple, optional
Size of figure in inches.
post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
posterior mass curve. If None, all runs are used.
cum_post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
cumulative posterior mass remaining curve. If None, all runs are used.
Returns
-------
fig: matplotlib figure
"""
logx_given_logl = kwargs.pop('logx_given_logl', None)
logl_given_logx = kwargs.pop('logl_given_logx', None)
logx_min = kwargs.pop('logx_min', None)
ymax = kwargs.pop('ymax', None)
npoints = kwargs.pop('npoints', 100)
figsize = kwargs.pop('figsize', (6.4, 2))
post_mass_norm = kwargs.pop('post_mass_norm', None)
cum_post_mass_norm = kwargs.pop('cum_post_mass_norm', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert set(method_names) == set(run_dict.keys()), (
'input method names=' + str(method_names) + ' do not match run_dict '
'keys=' + str(run_dict.keys()))
# Plotting
# --------
fig = plt.figure(figsize=figsize)
ax = plt.gca()
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Reserve colors for certain common method_names so they are always the
# same regardless of method_name order for consistency in the paper.
linecolor_dict = {'standard': colors[2],
'dynamic $G=0$': colors[8],
'dynamic $G=1$': colors[9]}
ax.set_prop_cycle('color', [colors[i] for i in [4, 1, 6, 0, 3, 5, 7]])
integrals_dict = {}
logx_min_list = []
for method_name in method_names:
integrals = np.zeros(len(run_dict[method_name]))
for nr, run in enumerate(run_dict[method_name]):
if 'logx' in run:
logx = run['logx']
elif logx_given_logl is not None:
logx = logx_given_logl(run['logl'])
else:
logx = nestcheck.ns_run_utils.get_logx(
run['nlive_array'], simulate=False)
logx_min_list.append(logx[-1])
logx[0] = 0 # to make lines extend all the way to the end
if nr == 0:
# Label the first line and store it so we can access its color
try:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name,
color=linecolor_dict[method_name])
except KeyError:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name)
else:
# Set other lines to same color and don't add labels
ax.plot(logx, run['nlive_array'], linewidth=1,
color=line.get_color())
# for normalising analytic weight lines
integrals[nr] = -np.trapz(run['nlive_array'], x=logx)
integrals_dict[method_name] = integrals[np.isfinite(integrals)]
# if not specified, set logx min to the lowest logx reached by a run
if logx_min is None:
logx_min = np.asarray(logx_min_list).min()
if logl_given_logx is not None:
# Plot analytic posterior mass and cumulative posterior mass
logx_plot = np.linspace(logx_min, 0, npoints)
logl = logl_given_logx(logx_plot)
# Remove any NaNs
logx_plot = logx_plot[np.where(~np.isnan(logl))[0]]
logl = logl[np.where(~np.isnan(logl))[0]]
w_an = rel_posterior_mass(logx_plot, logl)
# Try normalising the analytic distribution of posterior mass to have
# the same area under the curve as the runs with dynamic_goal=1 (the
# ones which we want to compare to it). If they are not available just
# normalise it to the average area under all the runs (which should be
# about the same if they have the same number of samples).
w_an *= average_by_key(integrals_dict, post_mass_norm)
ax.plot(logx_plot, w_an,
linewidth=2, label='relative posterior mass',
linestyle=':', color='k')
# plot cumulative posterior mass
w_an_c = np.cumsum(w_an)
w_an_c /= np.trapz(w_an_c, x=logx_plot)
# Try normalising the cumulative distribution of posterior mass to have
# the same area under the curve as the runs with dynamic_goal=0 (the
# ones which we want to compare to it). If they are not available just
# normalise it to the average area under all the runs (which should be
# about the same if they have the same number of samples).
w_an_c *= average_by_key(integrals_dict, cum_post_mass_norm)
ax.plot(logx_plot, w_an_c, linewidth=2, linestyle='--', dashes=(2, 3),
label='posterior mass remaining', color='darkblue')
ax.set_ylabel('number of live points')
ax.set_xlabel(r'$\log X $')
# set limits
if ymax is not None:
ax.set_ylim([0, ymax])
else:
ax.set_ylim(bottom=0)
ax.set_xlim([logx_min, 0])
ax.legend()
return fig | [
"def",
"plot_run_nlive",
"(",
"method_names",
",",
"run_dict",
",",
"*",
"*",
"kwargs",
")",
":",
"logx_given_logl",
"=",
"kwargs",
".",
"pop",
"(",
"'logx_given_logl'",
",",
"None",
")",
"logl_given_logx",
"=",
"kwargs",
".",
"pop",
"(",
"'logl_given_logx'",
... | Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper (Higson et al. 2019).
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining.
Parameters
----------
method_names: list of strs
run_dict: dict of lists of nested sampling runs.
Keys of run_dict must be method_names.
logx_given_logl: function, optional
For mapping points' logl values to logx values.
If not specified the logx coordinates for each run are estimated using
its numbers of live points.
logl_given_logx: function, optional
For calculating the relative posterior mass and posterior mass
remaining at each logx coordinate.
logx_min: float, optional
Lower limit of logx axis. If not specified this is set to the lowest
logx reached by any of the runs.
ymax: bool, optional
Maximum value for plot's nlive axis (yaxis).
npoints: int, optional
Number of points to have in the fgivenx plot grids.
figsize: tuple, optional
Size of figure in inches.
post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
posterior mass curve. If None, all runs are used.
cum_post_mass_norm: str or None, optional
Specify method_name for runs use form normalising the analytic
cumulative posterior mass remaining curve. If None, all runs are used.
Returns
-------
fig: matplotlib figure | [
"Plot",
"the",
"allocations",
"of",
"live",
"points",
"as",
"a",
"function",
"of",
"logX",
"for",
"the",
"input",
"sets",
"of",
"nested",
"sampling",
"runs",
"of",
"the",
"type",
"used",
"in",
"the",
"dynamic",
"nested",
"sampling",
"paper",
"(",
"Higson",... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L21-L154 | train |
ejhigson/nestcheck | nestcheck/plots.py | kde_plot_df | def kde_plot_df(df, xlims=None, **kwargs):
"""Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure
"""
assert xlims is None or isinstance(xlims, dict)
figsize = kwargs.pop('figsize', (6.4, 1.5))
num_xticks = kwargs.pop('num_xticks', None)
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows)))
normalize = kwargs.pop('normalize', True)
legend = kwargs.pop('legend', False)
legend_kwargs = kwargs.pop('legend_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
for nax, col in enumerate(df):
if nrows == 1:
ax = axes[nax]
else:
ax = axes[nax // ncols, nax % ncols]
supmin = df[col].apply(np.min).min()
supmax = df[col].apply(np.max).max()
support = np.linspace(supmin - 0.1 * (supmax - supmin),
supmax + 0.1 * (supmax - supmin), 200)
handles = []
labels = []
for name, samps in df[col].iteritems():
pdf = scipy.stats.gaussian_kde(samps)(support)
if not normalize:
pdf /= pdf.max()
handles.append(ax.plot(support, pdf, label=name)[0])
labels.append(name)
ax.set_ylim(bottom=0)
ax.set_yticks([])
if xlims is not None:
try:
ax.set_xlim(xlims[col])
except KeyError:
pass
ax.set_xlabel(col)
if num_xticks is not None:
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=num_xticks))
if legend:
fig.legend(handles, labels, **legend_kwargs)
return fig | python | def kde_plot_df(df, xlims=None, **kwargs):
"""Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure
"""
assert xlims is None or isinstance(xlims, dict)
figsize = kwargs.pop('figsize', (6.4, 1.5))
num_xticks = kwargs.pop('num_xticks', None)
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows)))
normalize = kwargs.pop('normalize', True)
legend = kwargs.pop('legend', False)
legend_kwargs = kwargs.pop('legend_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
for nax, col in enumerate(df):
if nrows == 1:
ax = axes[nax]
else:
ax = axes[nax // ncols, nax % ncols]
supmin = df[col].apply(np.min).min()
supmax = df[col].apply(np.max).max()
support = np.linspace(supmin - 0.1 * (supmax - supmin),
supmax + 0.1 * (supmax - supmin), 200)
handles = []
labels = []
for name, samps in df[col].iteritems():
pdf = scipy.stats.gaussian_kde(samps)(support)
if not normalize:
pdf /= pdf.max()
handles.append(ax.plot(support, pdf, label=name)[0])
labels.append(name)
ax.set_ylim(bottom=0)
ax.set_yticks([])
if xlims is not None:
try:
ax.set_xlim(xlims[col])
except KeyError:
pass
ax.set_xlabel(col)
if num_xticks is not None:
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=num_xticks))
if legend:
fig.legend(handles, labels, **legend_kwargs)
return fig | [
"def",
"kde_plot_df",
"(",
"df",
",",
"xlims",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"xlims",
"is",
"None",
"or",
"isinstance",
"(",
"xlims",
",",
"dict",
")",
"figsize",
"=",
"kwargs",
".",
"pop",
"(",
"'figsize'",
",",
"(",
"6... | Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure | [
"Plots",
"kde",
"estimates",
"of",
"distributions",
"of",
"samples",
"in",
"each",
"cell",
"of",
"the",
"input",
"pandas",
"DataFrame",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L157-L232 | train |
ejhigson/nestcheck | nestcheck/plots.py | bs_param_dists | def bs_param_dists(run_list, **kwargs):
"""Creates posterior distributions and their bootstrap error functions for
input runs and estimators.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations' (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to be used for the fgivenx
distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
figsize: tuple, optional
Matplotlib figsize in (inches).
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure
"""
fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0],
lambda theta: theta[:, 1]])
labels = kwargs.pop('labels', [r'$\theta_' + str(i + 1) + '$' for i in
range(len(fthetas))])
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
random_seed = kwargs.pop('random_seed', 0)
figsize = kwargs.pop('figsize', (6.4, 2))
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# Use random seed to make samples consistent and allow caching.
# To avoid fixing seed use random_seed=None
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
if not isinstance(run_list, list):
run_list = [run_list]
assert len(labels) == len(fthetas), (
'There should be the same number of axes and labels')
width_ratios = [40] * len(fthetas) + [1] * len(run_list)
fig, axes = plt.subplots(nrows=1, ncols=len(run_list) + len(fthetas),
gridspec_kw={'wspace': 0.1,
'width_ratios': width_ratios},
figsize=figsize)
colormaps = ['Reds_r', 'Blues_r', 'Greys_r', 'Greens_r', 'Oranges_r']
mean_colors = ['darkred', 'darkblue', 'darkgrey', 'darkgreen',
'darkorange']
# plot in reverse order so reds are final plot and always on top
for nrun, run in reversed(list(enumerate(run_list))):
try:
cache = cache_in + '_' + str(nrun)
except TypeError:
cache = None
# add bs distribution plots
cbar = plot_bs_dists(run, fthetas, axes[:len(fthetas)],
parallel=parallel,
ftheta_lims=ftheta_lims, cache=cache,
n_simulate=n_simulate, nx=nx, ny=ny,
rasterize_contours=rasterize_contours,
mean_color=mean_colors[nrun],
colormap=colormaps[nrun],
tqdm_kwargs=tqdm_kwargs)
# add colorbar
colorbar_plot = plt.colorbar(cbar, cax=axes[len(fthetas) + nrun],
ticks=[1, 2, 3])
colorbar_plot.solids.set_edgecolor('face')
colorbar_plot.ax.set_yticklabels([])
if nrun == len(run_list) - 1:
colorbar_plot.ax.set_yticklabels(
[r'$1\sigma$', r'$2\sigma$', r'$3\sigma$'])
# Format axis ticks and labels
for nax, ax in enumerate(axes[:len(fthetas)]):
ax.set_yticks([])
ax.set_xlabel(labels[nax])
if ax.is_first_col():
ax.set_ylabel('probability')
# Prune final xtick label so it doesn't overlap with next plot
prune = 'upper' if nax != len(fthetas) - 1 else None
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=5, prune=prune))
np.random.set_state(state) # return to original random state
return fig | python | def bs_param_dists(run_list, **kwargs):
"""Creates posterior distributions and their bootstrap error functions for
input runs and estimators.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations' (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to be used for the fgivenx
distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
figsize: tuple, optional
Matplotlib figsize in (inches).
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure
"""
fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0],
lambda theta: theta[:, 1]])
labels = kwargs.pop('labels', [r'$\theta_' + str(i + 1) + '$' for i in
range(len(fthetas))])
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
random_seed = kwargs.pop('random_seed', 0)
figsize = kwargs.pop('figsize', (6.4, 2))
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# Use random seed to make samples consistent and allow caching.
# To avoid fixing seed use random_seed=None
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
if not isinstance(run_list, list):
run_list = [run_list]
assert len(labels) == len(fthetas), (
'There should be the same number of axes and labels')
width_ratios = [40] * len(fthetas) + [1] * len(run_list)
fig, axes = plt.subplots(nrows=1, ncols=len(run_list) + len(fthetas),
gridspec_kw={'wspace': 0.1,
'width_ratios': width_ratios},
figsize=figsize)
colormaps = ['Reds_r', 'Blues_r', 'Greys_r', 'Greens_r', 'Oranges_r']
mean_colors = ['darkred', 'darkblue', 'darkgrey', 'darkgreen',
'darkorange']
# plot in reverse order so reds are final plot and always on top
for nrun, run in reversed(list(enumerate(run_list))):
try:
cache = cache_in + '_' + str(nrun)
except TypeError:
cache = None
# add bs distribution plots
cbar = plot_bs_dists(run, fthetas, axes[:len(fthetas)],
parallel=parallel,
ftheta_lims=ftheta_lims, cache=cache,
n_simulate=n_simulate, nx=nx, ny=ny,
rasterize_contours=rasterize_contours,
mean_color=mean_colors[nrun],
colormap=colormaps[nrun],
tqdm_kwargs=tqdm_kwargs)
# add colorbar
colorbar_plot = plt.colorbar(cbar, cax=axes[len(fthetas) + nrun],
ticks=[1, 2, 3])
colorbar_plot.solids.set_edgecolor('face')
colorbar_plot.ax.set_yticklabels([])
if nrun == len(run_list) - 1:
colorbar_plot.ax.set_yticklabels(
[r'$1\sigma$', r'$2\sigma$', r'$3\sigma$'])
# Format axis ticks and labels
for nax, ax in enumerate(axes[:len(fthetas)]):
ax.set_yticks([])
ax.set_xlabel(labels[nax])
if ax.is_first_col():
ax.set_ylabel('probability')
# Prune final xtick label so it doesn't overlap with next plot
prune = 'upper' if nax != len(fthetas) - 1 else None
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=5, prune=prune))
np.random.set_state(state) # return to original random state
return fig | [
"def",
"bs_param_dists",
"(",
"run_list",
",",
"*",
"*",
"kwargs",
")",
":",
"fthetas",
"=",
"kwargs",
".",
"pop",
"(",
"'fthetas'",
",",
"[",
"lambda",
"theta",
":",
"theta",
"[",
":",
",",
"0",
"]",
",",
"lambda",
"theta",
":",
"theta",
"[",
":",... | Creates posterior distributions and their bootstrap error functions for
input runs and estimators.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations' (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to be used for the fgivenx
distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
figsize: tuple, optional
Matplotlib figsize in (inches).
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure | [
"Creates",
"posterior",
"distributions",
"and",
"their",
"bootstrap",
"error",
"functions",
"for",
"input",
"runs",
"and",
"estimators",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L235-L346 | train |
ejhigson/nestcheck | nestcheck/plots.py | param_logx_diagram | def param_logx_diagram(run_list, **kwargs):
"""Creates diagrams of a nested sampling run's evolution as it iterates
towards higher likelihoods, expressed as a function of log X, where X(L) is
the fraction of the prior volume with likelihood greater than some value L.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations" (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: dict, optional
Plot limits for each ftheta.
plot_means: bool, optional
Should the mean value of each ftheta be plotted?
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
logx_min: float, optional
Lower limit of logx axis.
figsize: tuple, optional
Matplotlib figure size (in inches).
colors: list of strs, optional
Colors to plot run scatter plots with.
colormaps: list of strs, optional
Colormaps to plot run fgivenx plots with.
npoints: int, optional
How many points to have in the logx array used to calculate and plot
analytical weights.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel optional
point_size: float, optional
size of markers on scatter plot (in pts)
thin: float, optional
factor by which to reduce the number of samples before plotting the
scatter plot. Must be in half-closed interval (0, 1].
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure
"""
fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0],
lambda theta: theta[:, 1]])
labels = kwargs.pop('labels', [r'$\theta_' + str(i + 1) + '$' for i in
range(len(fthetas))])
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
threads_to_plot = kwargs.pop('threads_to_plot', [0])
plot_means = kwargs.pop('plot_means', True)
n_simulate = kwargs.pop('n_simulate', 100)
random_seed = kwargs.pop('random_seed', 0)
logx_min = kwargs.pop('logx_min', None)
figsize = kwargs.pop('figsize', (6.4, 2 * (1 + len(fthetas))))
colors = kwargs.pop('colors', ['red', 'blue', 'grey', 'green', 'orange'])
colormaps = kwargs.pop('colormaps', ['Reds_r', 'Blues_r', 'Greys_r',
'Greens_r', 'Oranges_r'])
# Options for fgivenx
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
point_size = kwargs.pop('point_size', 0.2)
thin = kwargs.pop('thin', 1)
npoints = kwargs.pop('npoints', 100)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if not isinstance(run_list, list):
run_list = [run_list]
# Use random seed to make samples consistent and allow caching.
# To avoid fixing seed use random_seed=None
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
if not plot_means:
mean_colors = [None] * len(colors)
else:
mean_colors = ['dark' + col for col in colors]
nlogx = npoints
ny_posterior = npoints
assert len(fthetas) == len(labels)
assert len(fthetas) == len(ftheta_lims)
thread_linestyles = ['-', '-.', ':']
# make figure
# -----------
fig, axes = plt.subplots(nrows=1 + len(fthetas), ncols=2, figsize=figsize,
gridspec_kw={'wspace': 0,
'hspace': 0,
'width_ratios': [15, 40]})
# make colorbar axes in top left corner
axes[0, 0].set_visible(False)
divider = mpl_toolkits.axes_grid1.make_axes_locatable(axes[0, 0])
colorbar_ax_list = []
for i in range(len(run_list)):
colorbar_ax_list.append(divider.append_axes("left", size=0.05,
pad=0.05))
# Reverse color bar axis order so when an extra run is added the other
# colorbars stay in the same place
colorbar_ax_list = list(reversed(colorbar_ax_list))
# plot runs in reverse order to put the first run on top
for nrun, run in reversed(list(enumerate(run_list))):
# Weight Plot
# -----------
ax_weight = axes[0, 1]
ax_weight.set_ylabel('posterior\nmass')
samples = np.zeros((n_simulate, run['nlive_array'].shape[0] * 2))
for i in range(n_simulate):
logx_temp = nestcheck.ns_run_utils.get_logx(
run['nlive_array'], simulate=True)[::-1]
logw_rel = logx_temp + run['logl'][::-1]
w_rel = np.exp(logw_rel - logw_rel.max())
w_rel /= np.trapz(w_rel, x=logx_temp)
samples[i, ::2] = logx_temp
samples[i, 1::2] = w_rel
if logx_min is None:
logx_min = samples[:, 0].min()
logx_sup = np.linspace(logx_min, 0, nlogx)
try:
cache = cache_in + '_' + str(nrun) + '_weights'
except TypeError:
cache = None
interp_alt = functools.partial(alternate_helper, func=np.interp)
y, pmf = fgivenx.drivers.compute_pmf(
interp_alt, logx_sup, samples, cache=cache, ny=npoints,
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(
logx_sup, y, pmf, ax_weight, rasterize_contours=rasterize_contours,
colors=plt.get_cmap(colormaps[nrun]))
ax_weight.set_xlim([logx_min, 0])
ax_weight.set_ylim(bottom=0)
ax_weight.set_yticks([])
ax_weight.set_xticklabels([])
# color bar plot
# --------------
colorbar_plot = plt.colorbar(cbar, cax=colorbar_ax_list[nrun],
ticks=[1, 2, 3])
colorbar_ax_list[nrun].yaxis.set_ticks_position('left')
colorbar_plot.solids.set_edgecolor('face')
colorbar_plot.ax.set_yticklabels([])
if nrun == 0:
colorbar_plot.ax.set_yticklabels(
[r'$1\sigma$', r'$2\sigma$', r'$3\sigma$'])
# samples plot
# ------------
logx = nestcheck.ns_run_utils.get_logx(run['nlive_array'],
simulate=False)
scatter_x = logx
scatter_theta = run['theta']
if thin != 1:
assert 0 < thin <= 1, (
'thin={} should be in the half-closed interval(0, 1]'
.format(thin))
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
inds = np.where(np.random.random(logx.shape) <= thin)[0]
np.random.set_state(state) # return to original random state
scatter_x = logx[inds]
scatter_theta = run['theta'][inds, :]
for nf, ftheta in enumerate(fthetas):
ax_samples = axes[1 + nf, 1]
ax_samples.scatter(scatter_x, ftheta(scatter_theta),
s=point_size, color=colors[nrun])
if threads_to_plot is not None:
for i in threads_to_plot:
thread_inds = np.where(run['thread_labels'] == i)[0]
ax_samples.plot(logx[thread_inds],
ftheta(run['theta'][thread_inds]),
linestyle=thread_linestyles[nrun],
color='black', lw=1)
ax_samples.set_xlim([logx_min, 0])
ax_samples.set_ylim(ftheta_lims[nf])
# Plot posteriors
# ---------------
posterior_axes = [axes[i + 1, 0] for i in range(len(fthetas))]
_ = plot_bs_dists(run, fthetas, posterior_axes,
ftheta_lims=ftheta_lims,
flip_axes=True, n_simulate=n_simulate,
rasterize_contours=rasterize_contours,
cache=cache_in, nx=npoints, ny=ny_posterior,
colormap=colormaps[nrun],
mean_color=mean_colors[nrun],
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
# Plot means onto scatter plot
# ----------------------------
if plot_means:
w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)
w_rel /= np.sum(w_rel)
means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]
for nf, mean in enumerate(means):
axes[nf + 1, 1].axhline(y=mean, lw=1, linestyle='--',
color=mean_colors[nrun])
# Format axes
for nf, ax in enumerate(posterior_axes):
ax.set_ylim(ftheta_lims[nf])
ax.invert_xaxis() # only invert each axis once, not for every run!
axes[-1, 1].set_xlabel(r'$\log X$')
# Add labels
for i, label in enumerate(labels):
axes[i + 1, 0].set_ylabel(label)
# Prune final ytick label so it doesn't overlap with next plot
prune = 'upper' if i != 0 else None
axes[i + 1, 0].yaxis.set_major_locator(
matplotlib.ticker.MaxNLocator(nbins=3, prune=prune))
for _, ax in np.ndenumerate(axes):
if not ax.is_first_col():
ax.set_yticklabels([])
if not (ax.is_last_row() and ax.is_last_col()):
ax.set_xticks([])
np.random.set_state(state) # return to original random state
return fig | python | def param_logx_diagram(run_list, **kwargs):
"""Creates diagrams of a nested sampling run's evolution as it iterates
towards higher likelihoods, expressed as a function of log X, where X(L) is
the fraction of the prior volume with likelihood greater than some value L.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations" (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: dict, optional
Plot limits for each ftheta.
plot_means: bool, optional
Should the mean value of each ftheta be plotted?
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
logx_min: float, optional
Lower limit of logx axis.
figsize: tuple, optional
Matplotlib figure size (in inches).
colors: list of strs, optional
Colors to plot run scatter plots with.
colormaps: list of strs, optional
Colormaps to plot run fgivenx plots with.
npoints: int, optional
How many points to have in the logx array used to calculate and plot
analytical weights.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel optional
point_size: float, optional
size of markers on scatter plot (in pts)
thin: float, optional
factor by which to reduce the number of samples before plotting the
scatter plot. Must be in half-closed interval (0, 1].
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure
"""
fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0],
lambda theta: theta[:, 1]])
labels = kwargs.pop('labels', [r'$\theta_' + str(i + 1) + '$' for i in
range(len(fthetas))])
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
threads_to_plot = kwargs.pop('threads_to_plot', [0])
plot_means = kwargs.pop('plot_means', True)
n_simulate = kwargs.pop('n_simulate', 100)
random_seed = kwargs.pop('random_seed', 0)
logx_min = kwargs.pop('logx_min', None)
figsize = kwargs.pop('figsize', (6.4, 2 * (1 + len(fthetas))))
colors = kwargs.pop('colors', ['red', 'blue', 'grey', 'green', 'orange'])
colormaps = kwargs.pop('colormaps', ['Reds_r', 'Blues_r', 'Greys_r',
'Greens_r', 'Oranges_r'])
# Options for fgivenx
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
point_size = kwargs.pop('point_size', 0.2)
thin = kwargs.pop('thin', 1)
npoints = kwargs.pop('npoints', 100)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if not isinstance(run_list, list):
run_list = [run_list]
# Use random seed to make samples consistent and allow caching.
# To avoid fixing seed use random_seed=None
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
if not plot_means:
mean_colors = [None] * len(colors)
else:
mean_colors = ['dark' + col for col in colors]
nlogx = npoints
ny_posterior = npoints
assert len(fthetas) == len(labels)
assert len(fthetas) == len(ftheta_lims)
thread_linestyles = ['-', '-.', ':']
# make figure
# -----------
fig, axes = plt.subplots(nrows=1 + len(fthetas), ncols=2, figsize=figsize,
gridspec_kw={'wspace': 0,
'hspace': 0,
'width_ratios': [15, 40]})
# make colorbar axes in top left corner
axes[0, 0].set_visible(False)
divider = mpl_toolkits.axes_grid1.make_axes_locatable(axes[0, 0])
colorbar_ax_list = []
for i in range(len(run_list)):
colorbar_ax_list.append(divider.append_axes("left", size=0.05,
pad=0.05))
# Reverse color bar axis order so when an extra run is added the other
# colorbars stay in the same place
colorbar_ax_list = list(reversed(colorbar_ax_list))
# plot runs in reverse order to put the first run on top
for nrun, run in reversed(list(enumerate(run_list))):
# Weight Plot
# -----------
ax_weight = axes[0, 1]
ax_weight.set_ylabel('posterior\nmass')
samples = np.zeros((n_simulate, run['nlive_array'].shape[0] * 2))
for i in range(n_simulate):
logx_temp = nestcheck.ns_run_utils.get_logx(
run['nlive_array'], simulate=True)[::-1]
logw_rel = logx_temp + run['logl'][::-1]
w_rel = np.exp(logw_rel - logw_rel.max())
w_rel /= np.trapz(w_rel, x=logx_temp)
samples[i, ::2] = logx_temp
samples[i, 1::2] = w_rel
if logx_min is None:
logx_min = samples[:, 0].min()
logx_sup = np.linspace(logx_min, 0, nlogx)
try:
cache = cache_in + '_' + str(nrun) + '_weights'
except TypeError:
cache = None
interp_alt = functools.partial(alternate_helper, func=np.interp)
y, pmf = fgivenx.drivers.compute_pmf(
interp_alt, logx_sup, samples, cache=cache, ny=npoints,
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(
logx_sup, y, pmf, ax_weight, rasterize_contours=rasterize_contours,
colors=plt.get_cmap(colormaps[nrun]))
ax_weight.set_xlim([logx_min, 0])
ax_weight.set_ylim(bottom=0)
ax_weight.set_yticks([])
ax_weight.set_xticklabels([])
# color bar plot
# --------------
colorbar_plot = plt.colorbar(cbar, cax=colorbar_ax_list[nrun],
ticks=[1, 2, 3])
colorbar_ax_list[nrun].yaxis.set_ticks_position('left')
colorbar_plot.solids.set_edgecolor('face')
colorbar_plot.ax.set_yticklabels([])
if nrun == 0:
colorbar_plot.ax.set_yticklabels(
[r'$1\sigma$', r'$2\sigma$', r'$3\sigma$'])
# samples plot
# ------------
logx = nestcheck.ns_run_utils.get_logx(run['nlive_array'],
simulate=False)
scatter_x = logx
scatter_theta = run['theta']
if thin != 1:
assert 0 < thin <= 1, (
'thin={} should be in the half-closed interval(0, 1]'
.format(thin))
state = np.random.get_state() # save initial random state
np.random.seed(random_seed)
inds = np.where(np.random.random(logx.shape) <= thin)[0]
np.random.set_state(state) # return to original random state
scatter_x = logx[inds]
scatter_theta = run['theta'][inds, :]
for nf, ftheta in enumerate(fthetas):
ax_samples = axes[1 + nf, 1]
ax_samples.scatter(scatter_x, ftheta(scatter_theta),
s=point_size, color=colors[nrun])
if threads_to_plot is not None:
for i in threads_to_plot:
thread_inds = np.where(run['thread_labels'] == i)[0]
ax_samples.plot(logx[thread_inds],
ftheta(run['theta'][thread_inds]),
linestyle=thread_linestyles[nrun],
color='black', lw=1)
ax_samples.set_xlim([logx_min, 0])
ax_samples.set_ylim(ftheta_lims[nf])
# Plot posteriors
# ---------------
posterior_axes = [axes[i + 1, 0] for i in range(len(fthetas))]
_ = plot_bs_dists(run, fthetas, posterior_axes,
ftheta_lims=ftheta_lims,
flip_axes=True, n_simulate=n_simulate,
rasterize_contours=rasterize_contours,
cache=cache_in, nx=npoints, ny=ny_posterior,
colormap=colormaps[nrun],
mean_color=mean_colors[nrun],
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
# Plot means onto scatter plot
# ----------------------------
if plot_means:
w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)
w_rel /= np.sum(w_rel)
means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]
for nf, mean in enumerate(means):
axes[nf + 1, 1].axhline(y=mean, lw=1, linestyle='--',
color=mean_colors[nrun])
# Format axes
for nf, ax in enumerate(posterior_axes):
ax.set_ylim(ftheta_lims[nf])
ax.invert_xaxis() # only invert each axis once, not for every run!
axes[-1, 1].set_xlabel(r'$\log X$')
# Add labels
for i, label in enumerate(labels):
axes[i + 1, 0].set_ylabel(label)
# Prune final ytick label so it doesn't overlap with next plot
prune = 'upper' if i != 0 else None
axes[i + 1, 0].yaxis.set_major_locator(
matplotlib.ticker.MaxNLocator(nbins=3, prune=prune))
for _, ax in np.ndenumerate(axes):
if not ax.is_first_col():
ax.set_yticklabels([])
if not (ax.is_last_row() and ax.is_last_col()):
ax.set_xticks([])
np.random.set_state(state) # return to original random state
return fig | [
"def",
"param_logx_diagram",
"(",
"run_list",
",",
"*",
"*",
"kwargs",
")",
":",
"fthetas",
"=",
"kwargs",
".",
"pop",
"(",
"'fthetas'",
",",
"[",
"lambda",
"theta",
":",
"theta",
"[",
":",
",",
"0",
"]",
",",
"lambda",
"theta",
":",
"theta",
"[",
... | Creates diagrams of a nested sampling run's evolution as it iterates
towards higher likelihoods, expressed as a function of log X, where X(L) is
the fraction of the prior volume with likelihood greater than some value L.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations" (Higson et al. 2019).
Parameters
----------
run_list: dict or list of dicts
Nested sampling run(s) to plot.
fthetas: list of functions, optional
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
labels: list of strs, optional
Labels for each ftheta.
ftheta_lims: dict, optional
Plot limits for each ftheta.
plot_means: bool, optional
Should the mean value of each ftheta be plotted?
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx distributions.
random_seed: int, optional
Seed to make sure results are consistent and fgivenx caching can be
used.
logx_min: float, optional
Lower limit of logx axis.
figsize: tuple, optional
Matplotlib figure size (in inches).
colors: list of strs, optional
Colors to plot run scatter plots with.
colormaps: list of strs, optional
Colormaps to plot run fgivenx plots with.
npoints: int, optional
How many points to have in the logx array used to calculate and plot
analytical weights.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel optional
point_size: float, optional
size of markers on scatter plot (in pts)
thin: float, optional
factor by which to reduce the number of samples before plotting the
scatter plot. Must be in half-closed interval (0, 1].
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
fig: matplotlib figure | [
"Creates",
"diagrams",
"of",
"a",
"nested",
"sampling",
"run",
"s",
"evolution",
"as",
"it",
"iterates",
"towards",
"higher",
"likelihoods",
"expressed",
"as",
"a",
"function",
"of",
"log",
"X",
"where",
"X",
"(",
"L",
")",
"is",
"the",
"fraction",
"of",
... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L349-L571 | train |
ejhigson/nestcheck | nestcheck/plots.py | plot_bs_dists | def plot_bs_dists(run, fthetas, axes, **kwargs):
"""Helper function for plotting uncertainties on posterior distributions
using bootstrap resamples and the fgivenx module. Used by bs_param_dists
and param_logx_diagram.
Parameters
----------
run: dict
Nested sampling run to plot.
fthetas: list of functions
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
axes: list of matplotlib axis objects
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx
distributions.
colormap: matplotlib colormap
Colors to plot fgivenx distribution.
mean_color: matplotlib color as str
Color to plot mean of each parameter. If None (default) means are not
plotted.
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
smooth: bool, optional
fgivenx smooth option.
flip_axes: bool, optional
Whether or not plot should be rotated 90 degrees anticlockwise onto its
side.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
cbar: matplotlib colorbar
For use in higher order functions.
"""
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
colormap = kwargs.pop('colormap', plt.get_cmap('Reds_r'))
mean_color = kwargs.pop('mean_color', None)
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
smooth = kwargs.pop('smooth', False)
flip_axes = kwargs.pop('flip_axes', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'leave': False})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(fthetas) == len(axes), \
'There should be the same number of axes and functions to plot'
assert len(fthetas) == len(ftheta_lims), \
'There should be the same number of axes and functions to plot'
threads = nestcheck.ns_run_utils.get_run_threads(run)
# get a list of evenly weighted theta samples from bootstrap resampling
bs_samps = []
for i in range(n_simulate):
run_temp = nestcheck.error_analysis.bootstrap_resample_run(
run, threads=threads)
w_temp = nestcheck.ns_run_utils.get_w_rel(run_temp, simulate=False)
bs_samps.append((run_temp['theta'], w_temp))
for nf, ftheta in enumerate(fthetas):
# Make an array where each row contains one bootstrap replication's
# samples
max_samps = 2 * max([bs_samp[0].shape[0] for bs_samp in bs_samps])
samples_array = np.full((n_simulate, max_samps), np.nan)
for i, (theta, weights) in enumerate(bs_samps):
nsamp = 2 * theta.shape[0]
samples_array[i, :nsamp:2] = ftheta(theta)
samples_array[i, 1:nsamp:2] = weights
ftheta_vals = np.linspace(ftheta_lims[nf][0], ftheta_lims[nf][1], nx)
try:
cache = cache_in + '_' + str(nf)
except TypeError:
cache = None
samp_kde = functools.partial(alternate_helper,
func=weighted_1d_gaussian_kde)
y, pmf = fgivenx.drivers.compute_pmf(
samp_kde, ftheta_vals, samples_array, ny=ny, cache=cache,
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
if flip_axes:
cbar = fgivenx.plot.plot(
y, ftheta_vals, np.swapaxes(pmf, 0, 1), axes[nf],
colors=colormap, rasterize_contours=rasterize_contours,
smooth=smooth)
else:
cbar = fgivenx.plot.plot(
ftheta_vals, y, pmf, axes[nf], colors=colormap,
rasterize_contours=rasterize_contours, smooth=smooth)
# Plot means
# ----------
if mean_color is not None:
w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)
w_rel /= np.sum(w_rel)
means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]
for nf, mean in enumerate(means):
if flip_axes:
axes[nf].axhline(y=mean, lw=1, linestyle='--',
color=mean_color)
else:
axes[nf].axvline(x=mean, lw=1, linestyle='--',
color=mean_color)
return cbar | python | def plot_bs_dists(run, fthetas, axes, **kwargs):
"""Helper function for plotting uncertainties on posterior distributions
using bootstrap resamples and the fgivenx module. Used by bs_param_dists
and param_logx_diagram.
Parameters
----------
run: dict
Nested sampling run to plot.
fthetas: list of functions
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
axes: list of matplotlib axis objects
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx
distributions.
colormap: matplotlib colormap
Colors to plot fgivenx distribution.
mean_color: matplotlib color as str
Color to plot mean of each parameter. If None (default) means are not
plotted.
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
smooth: bool, optional
fgivenx smooth option.
flip_axes: bool, optional
Whether or not plot should be rotated 90 degrees anticlockwise onto its
side.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
cbar: matplotlib colorbar
For use in higher order functions.
"""
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
colormap = kwargs.pop('colormap', plt.get_cmap('Reds_r'))
mean_color = kwargs.pop('mean_color', None)
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
smooth = kwargs.pop('smooth', False)
flip_axes = kwargs.pop('flip_axes', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'leave': False})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(fthetas) == len(axes), \
'There should be the same number of axes and functions to plot'
assert len(fthetas) == len(ftheta_lims), \
'There should be the same number of axes and functions to plot'
threads = nestcheck.ns_run_utils.get_run_threads(run)
# get a list of evenly weighted theta samples from bootstrap resampling
bs_samps = []
for i in range(n_simulate):
run_temp = nestcheck.error_analysis.bootstrap_resample_run(
run, threads=threads)
w_temp = nestcheck.ns_run_utils.get_w_rel(run_temp, simulate=False)
bs_samps.append((run_temp['theta'], w_temp))
for nf, ftheta in enumerate(fthetas):
# Make an array where each row contains one bootstrap replication's
# samples
max_samps = 2 * max([bs_samp[0].shape[0] for bs_samp in bs_samps])
samples_array = np.full((n_simulate, max_samps), np.nan)
for i, (theta, weights) in enumerate(bs_samps):
nsamp = 2 * theta.shape[0]
samples_array[i, :nsamp:2] = ftheta(theta)
samples_array[i, 1:nsamp:2] = weights
ftheta_vals = np.linspace(ftheta_lims[nf][0], ftheta_lims[nf][1], nx)
try:
cache = cache_in + '_' + str(nf)
except TypeError:
cache = None
samp_kde = functools.partial(alternate_helper,
func=weighted_1d_gaussian_kde)
y, pmf = fgivenx.drivers.compute_pmf(
samp_kde, ftheta_vals, samples_array, ny=ny, cache=cache,
parallel=parallel, tqdm_kwargs=tqdm_kwargs)
if flip_axes:
cbar = fgivenx.plot.plot(
y, ftheta_vals, np.swapaxes(pmf, 0, 1), axes[nf],
colors=colormap, rasterize_contours=rasterize_contours,
smooth=smooth)
else:
cbar = fgivenx.plot.plot(
ftheta_vals, y, pmf, axes[nf], colors=colormap,
rasterize_contours=rasterize_contours, smooth=smooth)
# Plot means
# ----------
if mean_color is not None:
w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)
w_rel /= np.sum(w_rel)
means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]
for nf, mean in enumerate(means):
if flip_axes:
axes[nf].axhline(y=mean, lw=1, linestyle='--',
color=mean_color)
else:
axes[nf].axvline(x=mean, lw=1, linestyle='--',
color=mean_color)
return cbar | [
"def",
"plot_bs_dists",
"(",
"run",
",",
"fthetas",
",",
"axes",
",",
"*",
"*",
"kwargs",
")",
":",
"ftheta_lims",
"=",
"kwargs",
".",
"pop",
"(",
"'ftheta_lims'",
",",
"[",
"[",
"-",
"1",
",",
"1",
"]",
"]",
"*",
"len",
"(",
"fthetas",
")",
")",... | Helper function for plotting uncertainties on posterior distributions
using bootstrap resamples and the fgivenx module. Used by bs_param_dists
and param_logx_diagram.
Parameters
----------
run: dict
Nested sampling run to plot.
fthetas: list of functions
Quantities to plot. Each must map a 2d theta array to 1d ftheta array -
i.e. map every sample's theta vector (every row) to a scalar quantity.
E.g. use lambda x: x[:, 0] to plot the first parameter.
axes: list of matplotlib axis objects
ftheta_lims: list, optional
Plot limits for each ftheta.
n_simulate: int, optional
Number of bootstrap replications to use for the fgivenx
distributions.
colormap: matplotlib colormap
Colors to plot fgivenx distribution.
mean_color: matplotlib color as str
Color to plot mean of each parameter. If None (default) means are not
plotted.
nx: int, optional
Size of x-axis grid for fgivenx plots.
ny: int, optional
Size of y-axis grid for fgivenx plots.
cache: str or None
Root for fgivenx caching (no caching if None).
parallel: bool, optional
fgivenx parallel option.
rasterize_contours: bool, optional
fgivenx rasterize_contours option.
smooth: bool, optional
fgivenx smooth option.
flip_axes: bool, optional
Whether or not plot should be rotated 90 degrees anticlockwise onto its
side.
tqdm_kwargs: dict, optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours.
Returns
-------
cbar: matplotlib colorbar
For use in higher order functions. | [
"Helper",
"function",
"for",
"plotting",
"uncertainties",
"on",
"posterior",
"distributions",
"using",
"bootstrap",
"resamples",
"and",
"the",
"fgivenx",
"module",
".",
"Used",
"by",
"bs_param_dists",
"and",
"param_logx_diagram",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L578-L693 | train |
ejhigson/nestcheck | nestcheck/plots.py | alternate_helper | def alternate_helper(x, alt_samps, func=None):
"""Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths."""
alt_samps = alt_samps[~np.isnan(alt_samps)]
arg1 = alt_samps[::2]
arg2 = alt_samps[1::2]
return func(x, arg1, arg2) | python | def alternate_helper(x, alt_samps, func=None):
"""Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths."""
alt_samps = alt_samps[~np.isnan(alt_samps)]
arg1 = alt_samps[::2]
arg2 = alt_samps[1::2]
return func(x, arg1, arg2) | [
"def",
"alternate_helper",
"(",
"x",
",",
"alt_samps",
",",
"func",
"=",
"None",
")",
":",
"alt_samps",
"=",
"alt_samps",
"[",
"~",
"np",
".",
"isnan",
"(",
"alt_samps",
")",
"]",
"arg1",
"=",
"alt_samps",
"[",
":",
":",
"2",
"]",
"arg2",
"=",
"alt... | Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths. | [
"Helper",
"function",
"for",
"making",
"fgivenx",
"plots",
"of",
"functions",
"with",
"2",
"array",
"arguments",
"of",
"variable",
"lengths",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L696-L702 | train |
ejhigson/nestcheck | nestcheck/plots.py | weighted_1d_gaussian_kde | def weighted_1d_gaussian_kde(x, samples, weights):
"""Gaussian kde with weighted samples (1d only). Uses Scott bandwidth
factor.
When all the sample weights are equal, this is equivalent to
kde = scipy.stats.gaussian_kde(theta)
return kde(x)
When the weights are not all equal, we compute the effective number
of samples as the information content (Shannon entropy)
nsamp_eff = exp(- sum_i (w_i log(w_i)))
Alternative ways to estimate nsamp_eff include Kish's formula
nsamp_eff = (sum_i w_i) ** 2 / (sum_i w_i ** 2)
See https://en.wikipedia.org/wiki/Effective_sample_size and "Effective
sample size for importance sampling based on discrepancy measures"
(Martino et al. 2017) for more information.
Parameters
----------
x: 1d numpy array
Coordinates at which to evaluate the kde.
samples: 1d numpy array
Samples from which to calculate kde.
weights: 1d numpy array of same shape as samples
Weights of each point. Need not be normalised as this is done inside
the function.
Returns
-------
result: 1d numpy array of same shape as x
Kde evaluated at x values.
"""
assert x.ndim == 1
assert samples.ndim == 1
assert samples.shape == weights.shape
# normalise weights and find effective number of samples
weights /= np.sum(weights)
nz_weights = weights[np.nonzero(weights)]
nsamp_eff = np.exp(-1. * np.sum(nz_weights * np.log(nz_weights)))
# Calculate the weighted sample variance
mu = np.sum(weights * samples)
var = np.sum(weights * ((samples - mu) ** 2))
var *= nsamp_eff / (nsamp_eff - 1) # correct for bias using nsamp_eff
# Calculate bandwidth
scott_factor = np.power(nsamp_eff, -1. / (5)) # 1d Scott factor
sig = np.sqrt(var) * scott_factor
# Calculate and weight residuals
xx, ss = np.meshgrid(x, samples)
chisquared = ((xx - ss) / sig) ** 2
energy = np.exp(-0.5 * chisquared) / np.sqrt(2 * np.pi * (sig ** 2))
result = np.sum(energy * weights[:, np.newaxis], axis=0)
return result | python | def weighted_1d_gaussian_kde(x, samples, weights):
"""Gaussian kde with weighted samples (1d only). Uses Scott bandwidth
factor.
When all the sample weights are equal, this is equivalent to
kde = scipy.stats.gaussian_kde(theta)
return kde(x)
When the weights are not all equal, we compute the effective number
of samples as the information content (Shannon entropy)
nsamp_eff = exp(- sum_i (w_i log(w_i)))
Alternative ways to estimate nsamp_eff include Kish's formula
nsamp_eff = (sum_i w_i) ** 2 / (sum_i w_i ** 2)
See https://en.wikipedia.org/wiki/Effective_sample_size and "Effective
sample size for importance sampling based on discrepancy measures"
(Martino et al. 2017) for more information.
Parameters
----------
x: 1d numpy array
Coordinates at which to evaluate the kde.
samples: 1d numpy array
Samples from which to calculate kde.
weights: 1d numpy array of same shape as samples
Weights of each point. Need not be normalised as this is done inside
the function.
Returns
-------
result: 1d numpy array of same shape as x
Kde evaluated at x values.
"""
assert x.ndim == 1
assert samples.ndim == 1
assert samples.shape == weights.shape
# normalise weights and find effective number of samples
weights /= np.sum(weights)
nz_weights = weights[np.nonzero(weights)]
nsamp_eff = np.exp(-1. * np.sum(nz_weights * np.log(nz_weights)))
# Calculate the weighted sample variance
mu = np.sum(weights * samples)
var = np.sum(weights * ((samples - mu) ** 2))
var *= nsamp_eff / (nsamp_eff - 1) # correct for bias using nsamp_eff
# Calculate bandwidth
scott_factor = np.power(nsamp_eff, -1. / (5)) # 1d Scott factor
sig = np.sqrt(var) * scott_factor
# Calculate and weight residuals
xx, ss = np.meshgrid(x, samples)
chisquared = ((xx - ss) / sig) ** 2
energy = np.exp(-0.5 * chisquared) / np.sqrt(2 * np.pi * (sig ** 2))
result = np.sum(energy * weights[:, np.newaxis], axis=0)
return result | [
"def",
"weighted_1d_gaussian_kde",
"(",
"x",
",",
"samples",
",",
"weights",
")",
":",
"assert",
"x",
".",
"ndim",
"==",
"1",
"assert",
"samples",
".",
"ndim",
"==",
"1",
"assert",
"samples",
".",
"shape",
"==",
"weights",
".",
"shape",
"# normalise weight... | Gaussian kde with weighted samples (1d only). Uses Scott bandwidth
factor.
When all the sample weights are equal, this is equivalent to
kde = scipy.stats.gaussian_kde(theta)
return kde(x)
When the weights are not all equal, we compute the effective number
of samples as the information content (Shannon entropy)
nsamp_eff = exp(- sum_i (w_i log(w_i)))
Alternative ways to estimate nsamp_eff include Kish's formula
nsamp_eff = (sum_i w_i) ** 2 / (sum_i w_i ** 2)
See https://en.wikipedia.org/wiki/Effective_sample_size and "Effective
sample size for importance sampling based on discrepancy measures"
(Martino et al. 2017) for more information.
Parameters
----------
x: 1d numpy array
Coordinates at which to evaluate the kde.
samples: 1d numpy array
Samples from which to calculate kde.
weights: 1d numpy array of same shape as samples
Weights of each point. Need not be normalised as this is done inside
the function.
Returns
-------
result: 1d numpy array of same shape as x
Kde evaluated at x values. | [
"Gaussian",
"kde",
"with",
"weighted",
"samples",
"(",
"1d",
"only",
")",
".",
"Uses",
"Scott",
"bandwidth",
"factor",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L705-L761 | train |
ejhigson/nestcheck | nestcheck/plots.py | rel_posterior_mass | def rel_posterior_mass(logx, logl):
"""Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value.
"""
logw = logx + logl
w_rel = np.exp(logw - logw.max())
w_rel /= np.abs(np.trapz(w_rel, x=logx))
return w_rel | python | def rel_posterior_mass(logx, logl):
"""Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value.
"""
logw = logx + logl
w_rel = np.exp(logw - logw.max())
w_rel /= np.abs(np.trapz(w_rel, x=logx))
return w_rel | [
"def",
"rel_posterior_mass",
"(",
"logx",
",",
"logl",
")",
":",
"logw",
"=",
"logx",
"+",
"logl",
"w_rel",
"=",
"np",
".",
"exp",
"(",
"logw",
"-",
"logw",
".",
"max",
"(",
")",
")",
"w_rel",
"/=",
"np",
".",
"abs",
"(",
"np",
".",
"trapz",
"(... | Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value. | [
"Calculate",
"the",
"relative",
"posterior",
"mass",
"for",
"some",
"array",
"of",
"logx",
"values",
"given",
"the",
"likelihood",
"prior",
"and",
"number",
"of",
"dimensions",
".",
"The",
"posterior",
"mass",
"at",
"each",
"logX",
"value",
"is",
"proportional... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L764-L787 | train |
ejhigson/nestcheck | nestcheck/plots.py | average_by_key | def average_by_key(dict_in, key):
"""Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float
"""
if key is None:
return np.mean(np.concatenate(list(dict_in.values())))
else:
try:
return np.mean(dict_in[key])
except KeyError:
print('method name "' + key + '" not found, so ' +
'normalise area under the analytic relative posterior ' +
'mass curve using the mean of all methods.')
return np.mean(np.concatenate(list(dict_in.values()))) | python | def average_by_key(dict_in, key):
"""Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float
"""
if key is None:
return np.mean(np.concatenate(list(dict_in.values())))
else:
try:
return np.mean(dict_in[key])
except KeyError:
print('method name "' + key + '" not found, so ' +
'normalise area under the analytic relative posterior ' +
'mass curve using the mean of all methods.')
return np.mean(np.concatenate(list(dict_in.values()))) | [
"def",
"average_by_key",
"(",
"dict_in",
",",
"key",
")",
":",
"if",
"key",
"is",
"None",
":",
"return",
"np",
".",
"mean",
"(",
"np",
".",
"concatenate",
"(",
"list",
"(",
"dict_in",
".",
"values",
"(",
")",
")",
")",
")",
"else",
":",
"try",
":... | Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float | [
"Helper",
"function",
"for",
"plot_run_nlive",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/plots.py#L790-L815 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | batch_process_data | def batch_process_data(file_roots, **kwargs):
"""Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
"""
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(
process_error_helper, file_roots, func_args=(base_dir, process_func),
func_kwargs=func_kwargs, **kwargs)
# Sort processed runs into the same order as file_roots (as parallel_apply
# does not preserve order)
data = sorted(data,
key=lambda x: file_roots.index(x['output']['file_root']))
# Extract error information and print
errors = {}
for i, run in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i)
except KeyError:
errors[run['error']] = [i]
for error_name, index_list in errors.items():
message = (error_name + ' processing ' + str(len(index_list)) + ' / '
+ str(len(file_roots)) + ' files')
if len(index_list) != len(file_roots):
message += ('. Roots with errors have (zero based) indexes: '
+ str(index_list))
print(message)
# Return runs which did not have errors
return [run for run in data if 'error' not in run] | python | def batch_process_data(file_roots, **kwargs):
"""Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
"""
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(
process_error_helper, file_roots, func_args=(base_dir, process_func),
func_kwargs=func_kwargs, **kwargs)
# Sort processed runs into the same order as file_roots (as parallel_apply
# does not preserve order)
data = sorted(data,
key=lambda x: file_roots.index(x['output']['file_root']))
# Extract error information and print
errors = {}
for i, run in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i)
except KeyError:
errors[run['error']] = [i]
for error_name, index_list in errors.items():
message = (error_name + ' processing ' + str(len(index_list)) + ' / '
+ str(len(file_roots)) + ' files')
if len(index_list) != len(file_roots):
message += ('. Roots with errors have (zero based) indexes: '
+ str(index_list))
print(message)
# Return runs which did not have errors
return [run for run in data if 'error' not in run] | [
"def",
"batch_process_data",
"(",
"file_roots",
",",
"*",
"*",
"kwargs",
")",
":",
"base_dir",
"=",
"kwargs",
".",
"pop",
"(",
"'base_dir'",
",",
"'chains'",
")",
"process_func",
"=",
"kwargs",
".",
"pop",
"(",
"'process_func'",
",",
"process_polychord_run",
... | Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details). | [
"Process",
"output",
"from",
"many",
"nested",
"sampling",
"runs",
"in",
"parallel",
"with",
"optional",
"error",
"handling",
"and",
"caching",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L103-L169 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | process_error_helper | def process_error_helper(root, base_dir, process_func, errors_to_handle=(),
**func_kwargs):
"""Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occasional problems decomposing runs into threads
due to limited numerical precision in logls.
Parameters
----------
root: str
File root.
base_dir: str
Directory containing file.
process_func: func
Function for processing file.
errors_to_handle: error type or tuple of error types
Errors to catch without throwing an exception.
func_kwargs: dict
Kwargs to pass to process_func.
Returns
-------
run: dict
Nested sampling run dict (see the module docstring for more
details) or, if an error occured, a dict containing its type
and the file root.
"""
try:
return process_func(root, base_dir, **func_kwargs)
except errors_to_handle as err:
run = {'error': type(err).__name__,
'output': {'file_root': root}}
return run | python | def process_error_helper(root, base_dir, process_func, errors_to_handle=(),
**func_kwargs):
"""Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occasional problems decomposing runs into threads
due to limited numerical precision in logls.
Parameters
----------
root: str
File root.
base_dir: str
Directory containing file.
process_func: func
Function for processing file.
errors_to_handle: error type or tuple of error types
Errors to catch without throwing an exception.
func_kwargs: dict
Kwargs to pass to process_func.
Returns
-------
run: dict
Nested sampling run dict (see the module docstring for more
details) or, if an error occured, a dict containing its type
and the file root.
"""
try:
return process_func(root, base_dir, **func_kwargs)
except errors_to_handle as err:
run = {'error': type(err).__name__,
'output': {'file_root': root}}
return run | [
"def",
"process_error_helper",
"(",
"root",
",",
"base_dir",
",",
"process_func",
",",
"errors_to_handle",
"=",
"(",
")",
",",
"*",
"*",
"func_kwargs",
")",
":",
"try",
":",
"return",
"process_func",
"(",
"root",
",",
"base_dir",
",",
"*",
"*",
"func_kwarg... | Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occasional problems decomposing runs into threads
due to limited numerical precision in logls.
Parameters
----------
root: str
File root.
base_dir: str
Directory containing file.
process_func: func
Function for processing file.
errors_to_handle: error type or tuple of error types
Errors to catch without throwing an exception.
func_kwargs: dict
Kwargs to pass to process_func.
Returns
-------
run: dict
Nested sampling run dict (see the module docstring for more
details) or, if an error occured, a dict containing its type
and the file root. | [
"Wrapper",
"which",
"applies",
"process_func",
"and",
"handles",
"some",
"common",
"errors",
"so",
"one",
"bad",
"run",
"does",
"not",
"spoil",
"the",
"whole",
"batch",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L172-L209 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | process_polychord_run | def process_polychord_run(file_root, base_dir, process_stats_file=True,
**kwargs):
"""Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_dead=True.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
process_stats_file: bool, optional
Should PolyChord's <root>.stats file be processed? Set to False if you
don't have the <root>.stats file (such as if PolyChord was run with
write_stats=False).
kwargs: dict, optional
Options passed to ns_run_utils.check_ns_run.
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
# N.B. PolyChord dead points files also contains remaining live points at
# termination
samples = np.loadtxt(os.path.join(base_dir, file_root) + '_dead-birth.txt')
ns_run = process_samples_array(samples, **kwargs)
ns_run['output'] = {'base_dir': base_dir, 'file_root': file_root}
if process_stats_file:
try:
ns_run['output'] = process_polychord_stats(file_root, base_dir)
except (OSError, IOError, ValueError) as err:
warnings.warn(
('process_polychord_stats raised {} processing {}.stats file. '
' Proceeding without stats.').format(
type(err).__name__, os.path.join(base_dir, file_root)),
UserWarning)
return ns_run | python | def process_polychord_run(file_root, base_dir, process_stats_file=True,
**kwargs):
"""Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_dead=True.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
process_stats_file: bool, optional
Should PolyChord's <root>.stats file be processed? Set to False if you
don't have the <root>.stats file (such as if PolyChord was run with
write_stats=False).
kwargs: dict, optional
Options passed to ns_run_utils.check_ns_run.
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
# N.B. PolyChord dead points files also contains remaining live points at
# termination
samples = np.loadtxt(os.path.join(base_dir, file_root) + '_dead-birth.txt')
ns_run = process_samples_array(samples, **kwargs)
ns_run['output'] = {'base_dir': base_dir, 'file_root': file_root}
if process_stats_file:
try:
ns_run['output'] = process_polychord_stats(file_root, base_dir)
except (OSError, IOError, ValueError) as err:
warnings.warn(
('process_polychord_stats raised {} processing {}.stats file. '
' Proceeding without stats.').format(
type(err).__name__, os.path.join(base_dir, file_root)),
UserWarning)
return ns_run | [
"def",
"process_polychord_run",
"(",
"file_root",
",",
"base_dir",
",",
"process_stats_file",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# N.B. PolyChord dead points files also contains remaining live points at",
"# termination",
"samples",
"=",
"np",
".",
"loadtxt"... | Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_dead=True.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
process_stats_file: bool, optional
Should PolyChord's <root>.stats file be processed? Set to False if you
don't have the <root>.stats file (such as if PolyChord was run with
write_stats=False).
kwargs: dict, optional
Options passed to ns_run_utils.check_ns_run.
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | [
"Loads",
"data",
"from",
"a",
"PolyChord",
"run",
"into",
"the",
"nestcheck",
"dictionary",
"format",
"for",
"analysis",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L212-L254 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | process_multinest_run | def process_multinest_run(file_root, base_dir, **kwargs):
"""Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
----------
file_root: str
Root name for output files. When running MultiNest, this is determined
by the nest_root parameter.
base_dir: str
Directory containing output files. When running MultiNest, this is
determined by the nest_root parameter.
kwargs: dict, optional
Passed to ns_run_utils.check_ns_run (via process_samples_array)
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
# Load dead and live points
dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt')
live = np.loadtxt(os.path.join(base_dir, file_root)
+ '-phys_live-birth.txt')
# Remove unnecessary final columns
dead = dead[:, :-2]
live = live[:, :-1]
assert dead[:, -2].max() < live[:, -2].min(), (
'final live points should have greater logls than any dead point!',
dead, live)
ns_run = process_samples_array(np.vstack((dead, live)), **kwargs)
assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf), (
'As MultiNest does not currently perform dynamic nested sampling, all '
'threads should start by sampling the whole prior.')
ns_run['output'] = {}
ns_run['output']['file_root'] = file_root
ns_run['output']['base_dir'] = base_dir
return ns_run | python | def process_multinest_run(file_root, base_dir, **kwargs):
"""Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
----------
file_root: str
Root name for output files. When running MultiNest, this is determined
by the nest_root parameter.
base_dir: str
Directory containing output files. When running MultiNest, this is
determined by the nest_root parameter.
kwargs: dict, optional
Passed to ns_run_utils.check_ns_run (via process_samples_array)
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
# Load dead and live points
dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt')
live = np.loadtxt(os.path.join(base_dir, file_root)
+ '-phys_live-birth.txt')
# Remove unnecessary final columns
dead = dead[:, :-2]
live = live[:, :-1]
assert dead[:, -2].max() < live[:, -2].min(), (
'final live points should have greater logls than any dead point!',
dead, live)
ns_run = process_samples_array(np.vstack((dead, live)), **kwargs)
assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf), (
'As MultiNest does not currently perform dynamic nested sampling, all '
'threads should start by sampling the whole prior.')
ns_run['output'] = {}
ns_run['output']['file_root'] = file_root
ns_run['output']['base_dir'] = base_dir
return ns_run | [
"def",
"process_multinest_run",
"(",
"file_root",
",",
"base_dir",
",",
"*",
"*",
"kwargs",
")",
":",
"# Load dead and live points",
"dead",
"=",
"np",
".",
"loadtxt",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"file_root",
")",
"+",
"'-dea... | Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
----------
file_root: str
Root name for output files. When running MultiNest, this is determined
by the nest_root parameter.
base_dir: str
Directory containing output files. When running MultiNest, this is
determined by the nest_root parameter.
kwargs: dict, optional
Passed to ns_run_utils.check_ns_run (via process_samples_array)
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | [
"Loads",
"data",
"from",
"a",
"MultiNest",
"run",
"into",
"the",
"nestcheck",
"dictionary",
"format",
"for",
"analysis",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L257-L299 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | process_dynesty_run | def process_dynesty_run(results):
"""Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
samples = np.zeros((results.samples.shape[0],
results.samples.shape[1] + 3))
samples[:, 0] = results.logl
samples[:, 1] = results.samples_id
samples[:, 3:] = results.samples
unique_th, first_inds = np.unique(results.samples_id, return_index=True)
assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))
thread_min_max = np.full((unique_th.shape[0], 2), np.nan)
try:
# Try processing standard nested sampling results
assert unique_th.shape[0] == results.nlive
assert np.array_equal(
np.unique(results.samples_id[-results.nlive:]),
np.asarray(range(results.nlive))), (
'perhaps the final live points are not included?')
thread_min_max[:, 0] = -np.inf
except AttributeError:
# If results has no nlive attribute, it must be dynamic nested sampling
assert unique_th.shape[0] == sum(results.batch_nlive)
for th_lab, ind in zip(unique_th, first_inds):
thread_min_max[th_lab, 0] = (
results.batch_bounds[results.samples_batch[ind], 0])
for th_lab in unique_th:
final_ind = np.where(results.samples_id == th_lab)[0][-1]
thread_min_max[th_lab, 1] = results.logl[final_ind]
samples[final_ind, 2] = -1
assert np.all(~np.isnan(thread_min_max))
run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)
nestcheck.ns_run_utils.check_ns_run(run)
return run | python | def process_dynesty_run(results):
"""Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
"""
samples = np.zeros((results.samples.shape[0],
results.samples.shape[1] + 3))
samples[:, 0] = results.logl
samples[:, 1] = results.samples_id
samples[:, 3:] = results.samples
unique_th, first_inds = np.unique(results.samples_id, return_index=True)
assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))
thread_min_max = np.full((unique_th.shape[0], 2), np.nan)
try:
# Try processing standard nested sampling results
assert unique_th.shape[0] == results.nlive
assert np.array_equal(
np.unique(results.samples_id[-results.nlive:]),
np.asarray(range(results.nlive))), (
'perhaps the final live points are not included?')
thread_min_max[:, 0] = -np.inf
except AttributeError:
# If results has no nlive attribute, it must be dynamic nested sampling
assert unique_th.shape[0] == sum(results.batch_nlive)
for th_lab, ind in zip(unique_th, first_inds):
thread_min_max[th_lab, 0] = (
results.batch_bounds[results.samples_batch[ind], 0])
for th_lab in unique_th:
final_ind = np.where(results.samples_id == th_lab)[0][-1]
thread_min_max[th_lab, 1] = results.logl[final_ind]
samples[final_ind, 2] = -1
assert np.all(~np.isnan(thread_min_max))
run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)
nestcheck.ns_run_utils.check_ns_run(run)
return run | [
"def",
"process_dynesty_run",
"(",
"results",
")",
":",
"samples",
"=",
"np",
".",
"zeros",
"(",
"(",
"results",
".",
"samples",
".",
"shape",
"[",
"0",
"]",
",",
"results",
".",
"samples",
".",
"shape",
"[",
"1",
"]",
"+",
"3",
")",
")",
"samples"... | Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | [
"Transforms",
"results",
"from",
"a",
"dynesty",
"run",
"into",
"the",
"nestcheck",
"dictionary",
"format",
"for",
"analysis",
".",
"This",
"function",
"has",
"been",
"tested",
"with",
"dynesty",
"v9",
".",
"2",
".",
"0",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L302-L357 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | process_polychord_stats | def process_polychord_stats(file_root, base_dir):
"""Reads a PolyChord <root>.stats output file and returns the information
contained in a dictionary.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
Returns
-------
output: dict
See PolyChord documentation for more details.
"""
filename = os.path.join(base_dir, file_root) + '.stats'
output = {'base_dir': base_dir,
'file_root': file_root}
with open(filename, 'r') as stats_file:
lines = stats_file.readlines()
output['logZ'] = float(lines[8].split()[2])
output['logZerr'] = float(lines[8].split()[4])
# Cluster logZs and errors
output['logZs'] = []
output['logZerrs'] = []
for line in lines[14:]:
if line[:5] != 'log(Z':
break
output['logZs'].append(float(
re.findall(r'=(.*)', line)[0].split()[0]))
output['logZerrs'].append(float(
re.findall(r'=(.*)', line)[0].split()[2]))
# Other output info
nclust = len(output['logZs'])
output['ncluster'] = nclust
output['nposterior'] = int(lines[20 + nclust].split()[1])
output['nequals'] = int(lines[21 + nclust].split()[1])
output['ndead'] = int(lines[22 + nclust].split()[1])
output['nlive'] = int(lines[23 + nclust].split()[1])
try:
output['nlike'] = int(lines[24 + nclust].split()[1])
except ValueError:
# if nlike has too many digits, PolyChord just writes ***** to .stats
# file. This causes a ValueError
output['nlike'] = np.nan
output['avnlike'] = float(lines[25 + nclust].split()[1])
output['avnlikeslice'] = float(lines[25 + nclust].split()[3])
# Means and stds of dimensions (not produced by PolyChord<=1.13)
if len(lines) > 29 + nclust:
output['param_means'] = []
output['param_mean_errs'] = []
for line in lines[29 + nclust:]:
output['param_means'].append(float(line.split()[1]))
output['param_mean_errs'].append(float(line.split()[3]))
return output | python | def process_polychord_stats(file_root, base_dir):
"""Reads a PolyChord <root>.stats output file and returns the information
contained in a dictionary.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
Returns
-------
output: dict
See PolyChord documentation for more details.
"""
filename = os.path.join(base_dir, file_root) + '.stats'
output = {'base_dir': base_dir,
'file_root': file_root}
with open(filename, 'r') as stats_file:
lines = stats_file.readlines()
output['logZ'] = float(lines[8].split()[2])
output['logZerr'] = float(lines[8].split()[4])
# Cluster logZs and errors
output['logZs'] = []
output['logZerrs'] = []
for line in lines[14:]:
if line[:5] != 'log(Z':
break
output['logZs'].append(float(
re.findall(r'=(.*)', line)[0].split()[0]))
output['logZerrs'].append(float(
re.findall(r'=(.*)', line)[0].split()[2]))
# Other output info
nclust = len(output['logZs'])
output['ncluster'] = nclust
output['nposterior'] = int(lines[20 + nclust].split()[1])
output['nequals'] = int(lines[21 + nclust].split()[1])
output['ndead'] = int(lines[22 + nclust].split()[1])
output['nlive'] = int(lines[23 + nclust].split()[1])
try:
output['nlike'] = int(lines[24 + nclust].split()[1])
except ValueError:
# if nlike has too many digits, PolyChord just writes ***** to .stats
# file. This causes a ValueError
output['nlike'] = np.nan
output['avnlike'] = float(lines[25 + nclust].split()[1])
output['avnlikeslice'] = float(lines[25 + nclust].split()[3])
# Means and stds of dimensions (not produced by PolyChord<=1.13)
if len(lines) > 29 + nclust:
output['param_means'] = []
output['param_mean_errs'] = []
for line in lines[29 + nclust:]:
output['param_means'].append(float(line.split()[1]))
output['param_mean_errs'].append(float(line.split()[3]))
return output | [
"def",
"process_polychord_stats",
"(",
"file_root",
",",
"base_dir",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"file_root",
")",
"+",
"'.stats'",
"output",
"=",
"{",
"'base_dir'",
":",
"base_dir",
",",
"'file_root'",
... | Reads a PolyChord <root>.stats output file and returns the information
contained in a dictionary.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
Returns
-------
output: dict
See PolyChord documentation for more details. | [
"Reads",
"a",
"PolyChord",
"<root",
">",
".",
"stats",
"output",
"file",
"and",
"returns",
"the",
"information",
"contained",
"in",
"a",
"dictionary",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L360-L415 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | process_samples_array | def process_samples_array(samples, **kwargs):
"""Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
kwargs: dict, optional
Options passed to birth_inds_given_contours
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more
details). Only contains information in samples (not additional
optional output key).
"""
samples = samples[np.argsort(samples[:, -2])]
ns_run = {}
ns_run['logl'] = samples[:, -2]
ns_run['theta'] = samples[:, :-2]
birth_contours = samples[:, -1]
# birth_contours, ns_run['theta'] = check_logls_unique(
# samples[:, -2], samples[:, -1], samples[:, :-2])
birth_inds = birth_inds_given_contours(
birth_contours, ns_run['logl'], **kwargs)
ns_run['thread_labels'] = threads_given_birth_inds(birth_inds)
unique_threads = np.unique(ns_run['thread_labels'])
assert np.array_equal(unique_threads,
np.asarray(range(unique_threads.shape[0])))
# Work out nlive_array and thread_min_max logls from thread labels and
# birth contours
thread_min_max = np.zeros((unique_threads.shape[0], 2))
# NB delta_nlive indexes are offset from points' indexes by 1 as we need an
# element to represent the initial sampling of live points before any dead
# points are created.
# I.E. birth on step 1 corresponds to replacing dead point zero
delta_nlive = np.zeros(samples.shape[0] + 1)
for label in unique_threads:
thread_inds = np.where(ns_run['thread_labels'] == label)[0]
# Max is final logl in thread
thread_min_max[label, 1] = ns_run['logl'][thread_inds[-1]]
thread_start_birth_ind = birth_inds[thread_inds[0]]
# delta nlive indexes are +1 from logl indexes to allow for initial
# nlive (before first dead point)
delta_nlive[thread_inds[-1] + 1] -= 1
if thread_start_birth_ind == birth_inds[0]:
# thread minimum is -inf as it starts by sampling from whole prior
thread_min_max[label, 0] = -np.inf
delta_nlive[0] += 1
else:
assert thread_start_birth_ind >= 0
thread_min_max[label, 0] = ns_run['logl'][thread_start_birth_ind]
delta_nlive[thread_start_birth_ind + 1] += 1
ns_run['thread_min_max'] = thread_min_max
ns_run['nlive_array'] = np.cumsum(delta_nlive)[:-1]
return ns_run | python | def process_samples_array(samples, **kwargs):
"""Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
kwargs: dict, optional
Options passed to birth_inds_given_contours
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more
details). Only contains information in samples (not additional
optional output key).
"""
samples = samples[np.argsort(samples[:, -2])]
ns_run = {}
ns_run['logl'] = samples[:, -2]
ns_run['theta'] = samples[:, :-2]
birth_contours = samples[:, -1]
# birth_contours, ns_run['theta'] = check_logls_unique(
# samples[:, -2], samples[:, -1], samples[:, :-2])
birth_inds = birth_inds_given_contours(
birth_contours, ns_run['logl'], **kwargs)
ns_run['thread_labels'] = threads_given_birth_inds(birth_inds)
unique_threads = np.unique(ns_run['thread_labels'])
assert np.array_equal(unique_threads,
np.asarray(range(unique_threads.shape[0])))
# Work out nlive_array and thread_min_max logls from thread labels and
# birth contours
thread_min_max = np.zeros((unique_threads.shape[0], 2))
# NB delta_nlive indexes are offset from points' indexes by 1 as we need an
# element to represent the initial sampling of live points before any dead
# points are created.
# I.E. birth on step 1 corresponds to replacing dead point zero
delta_nlive = np.zeros(samples.shape[0] + 1)
for label in unique_threads:
thread_inds = np.where(ns_run['thread_labels'] == label)[0]
# Max is final logl in thread
thread_min_max[label, 1] = ns_run['logl'][thread_inds[-1]]
thread_start_birth_ind = birth_inds[thread_inds[0]]
# delta nlive indexes are +1 from logl indexes to allow for initial
# nlive (before first dead point)
delta_nlive[thread_inds[-1] + 1] -= 1
if thread_start_birth_ind == birth_inds[0]:
# thread minimum is -inf as it starts by sampling from whole prior
thread_min_max[label, 0] = -np.inf
delta_nlive[0] += 1
else:
assert thread_start_birth_ind >= 0
thread_min_max[label, 0] = ns_run['logl'][thread_start_birth_ind]
delta_nlive[thread_start_birth_ind + 1] += 1
ns_run['thread_min_max'] = thread_min_max
ns_run['nlive_array'] = np.cumsum(delta_nlive)[:-1]
return ns_run | [
"def",
"process_samples_array",
"(",
"samples",
",",
"*",
"*",
"kwargs",
")",
":",
"samples",
"=",
"samples",
"[",
"np",
".",
"argsort",
"(",
"samples",
"[",
":",
",",
"-",
"2",
"]",
")",
"]",
"ns_run",
"=",
"{",
"}",
"ns_run",
"[",
"'logl'",
"]",
... | Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
kwargs: dict, optional
Options passed to birth_inds_given_contours
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more
details). Only contains information in samples (not additional
optional output key). | [
"Convert",
"an",
"array",
"of",
"nested",
"sampling",
"dead",
"and",
"live",
"points",
"of",
"the",
"type",
"produced",
"by",
"PolyChord",
"and",
"MultiNest",
"into",
"a",
"nestcheck",
"nested",
"sampling",
"run",
"dictionary",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L418-L478 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | birth_inds_given_contours | def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs):
"""Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1.
"""
dup_assert = kwargs.pop('dup_assert', False)
dup_warn = kwargs.pop('dup_warn', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert logl_arr.ndim == 1, logl_arr.ndim
assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim
# Check for duplicate logl values (if specified by dup_assert or dup_warn)
nestcheck.ns_run_utils.check_ns_run_logls(
{'logl': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn)
# Random seed so results are consistent if there are duplicate logls
state = np.random.get_state() # Save random state before seeding
np.random.seed(0)
# Calculate birth inds
init_birth = birth_logl_arr[0]
assert np.all(birth_logl_arr <= logl_arr), (
logl_arr[birth_logl_arr > logl_arr])
birth_inds = np.full(birth_logl_arr.shape, np.nan)
birth_inds[birth_logl_arr == init_birth] = -1
for i, birth_logl in enumerate(birth_logl_arr):
if not np.isnan(birth_inds[i]):
# birth ind has already been assigned
continue
dup_deaths = np.where(logl_arr == birth_logl)[0]
if dup_deaths.shape == (1,):
# death index is unique
birth_inds[i] = dup_deaths[0]
continue
# The remainder of this loop deals with the case that multiple points
# have the same logl value (=birth_logl). This can occur due to limited
# precision, or for likelihoods with contant regions. In this case we
# randomly assign the duplicates birth steps in a manner
# that provides a valid division into nested sampling runs
dup_births = np.where(birth_logl_arr == birth_logl)[0]
assert dup_deaths.shape[0] > 1, dup_deaths
if np.all(birth_logl_arr[dup_deaths] != birth_logl):
# If no points both are born and die on this contour, we can just
# randomly assign an order
np.random.shuffle(dup_deaths)
inds_to_use = dup_deaths
else:
# If some points are both born and die on the contour, we need to
# take care that the assigned birth inds do not result in some
# points dying before they are born
try:
inds_to_use = sample_less_than_condition(
dup_deaths, dup_births)
except ValueError:
raise ValueError((
'There is no way to allocate indexes dup_deaths={} such '
'that each is less than dup_births={}.').format(
dup_deaths, dup_births))
try:
# Add our selected inds_to_use values to the birth_inds array
# Note that dup_deaths (and hence inds to use) may have more
# members than dup_births, because one of the duplicates may be
# the final point in a thread. We therefore include only the first
# dup_births.shape[0] elements
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
except ValueError:
warnings.warn((
'for logl={}, the number of points born (indexes='
'{}) is bigger than the number of points dying '
'(indexes={}). This indicates a problem with your '
'nested sampling software - it may be caused by '
'a bug in PolyChord which was fixed in PolyChord '
'v1.14, so try upgrading. I will try to give an '
'approximate allocation of threads but this may '
'fail.').format(
birth_logl, dup_births, inds_to_use), UserWarning)
extra_inds = np.random.choice(
inds_to_use, size=dup_births.shape[0] - inds_to_use.shape[0])
inds_to_use = np.concatenate((inds_to_use, extra_inds))
np.random.shuffle(inds_to_use)
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()
np.random.set_state(state) # Reset random state
return birth_inds.astype(int) | python | def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs):
"""Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1.
"""
dup_assert = kwargs.pop('dup_assert', False)
dup_warn = kwargs.pop('dup_warn', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert logl_arr.ndim == 1, logl_arr.ndim
assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim
# Check for duplicate logl values (if specified by dup_assert or dup_warn)
nestcheck.ns_run_utils.check_ns_run_logls(
{'logl': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn)
# Random seed so results are consistent if there are duplicate logls
state = np.random.get_state() # Save random state before seeding
np.random.seed(0)
# Calculate birth inds
init_birth = birth_logl_arr[0]
assert np.all(birth_logl_arr <= logl_arr), (
logl_arr[birth_logl_arr > logl_arr])
birth_inds = np.full(birth_logl_arr.shape, np.nan)
birth_inds[birth_logl_arr == init_birth] = -1
for i, birth_logl in enumerate(birth_logl_arr):
if not np.isnan(birth_inds[i]):
# birth ind has already been assigned
continue
dup_deaths = np.where(logl_arr == birth_logl)[0]
if dup_deaths.shape == (1,):
# death index is unique
birth_inds[i] = dup_deaths[0]
continue
# The remainder of this loop deals with the case that multiple points
# have the same logl value (=birth_logl). This can occur due to limited
# precision, or for likelihoods with contant regions. In this case we
# randomly assign the duplicates birth steps in a manner
# that provides a valid division into nested sampling runs
dup_births = np.where(birth_logl_arr == birth_logl)[0]
assert dup_deaths.shape[0] > 1, dup_deaths
if np.all(birth_logl_arr[dup_deaths] != birth_logl):
# If no points both are born and die on this contour, we can just
# randomly assign an order
np.random.shuffle(dup_deaths)
inds_to_use = dup_deaths
else:
# If some points are both born and die on the contour, we need to
# take care that the assigned birth inds do not result in some
# points dying before they are born
try:
inds_to_use = sample_less_than_condition(
dup_deaths, dup_births)
except ValueError:
raise ValueError((
'There is no way to allocate indexes dup_deaths={} such '
'that each is less than dup_births={}.').format(
dup_deaths, dup_births))
try:
# Add our selected inds_to_use values to the birth_inds array
# Note that dup_deaths (and hence inds to use) may have more
# members than dup_births, because one of the duplicates may be
# the final point in a thread. We therefore include only the first
# dup_births.shape[0] elements
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
except ValueError:
warnings.warn((
'for logl={}, the number of points born (indexes='
'{}) is bigger than the number of points dying '
'(indexes={}). This indicates a problem with your '
'nested sampling software - it may be caused by '
'a bug in PolyChord which was fixed in PolyChord '
'v1.14, so try upgrading. I will try to give an '
'approximate allocation of threads but this may '
'fail.').format(
birth_logl, dup_births, inds_to_use), UserWarning)
extra_inds = np.random.choice(
inds_to_use, size=dup_births.shape[0] - inds_to_use.shape[0])
inds_to_use = np.concatenate((inds_to_use, extra_inds))
np.random.shuffle(inds_to_use)
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()
np.random.set_state(state) # Reset random state
return birth_inds.astype(int) | [
"def",
"birth_inds_given_contours",
"(",
"birth_logl_arr",
",",
"logl_arr",
",",
"*",
"*",
"kwargs",
")",
":",
"dup_assert",
"=",
"kwargs",
".",
"pop",
"(",
"'dup_assert'",
",",
"False",
")",
"dup_warn",
"=",
"kwargs",
".",
"pop",
"(",
"'dup_warn'",
",",
"... | Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1. | [
"Maps",
"the",
"iso",
"-",
"likelihood",
"contours",
"on",
"which",
"points",
"were",
"born",
"to",
"the",
"index",
"of",
"the",
"dead",
"point",
"on",
"this",
"contour",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L481-L591 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | sample_less_than_condition | def sample_less_than_condition(choices_in, condition):
"""Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
"""
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
# randomly select one of the choices which meets condition
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
# remove the chosen value
choices = np.delete(choices, selected_ind)
return output | python | def sample_less_than_condition(choices_in, condition):
"""Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
"""
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
# randomly select one of the choices which meets condition
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
# remove the chosen value
choices = np.delete(choices, selected_ind)
return output | [
"def",
"sample_less_than_condition",
"(",
"choices_in",
",",
"condition",
")",
":",
"output",
"=",
"np",
".",
"zeros",
"(",
"min",
"(",
"condition",
".",
"shape",
"[",
"0",
"]",
",",
"choices_in",
".",
"shape",
"[",
"0",
"]",
")",
")",
"choices",
"=",
... | Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order. | [
"Creates",
"a",
"random",
"sample",
"from",
"choices",
"without",
"replacement",
"subject",
"to",
"the",
"condition",
"that",
"each",
"element",
"of",
"the",
"output",
"is",
"greater",
"than",
"the",
"corresponding",
"element",
"of",
"the",
"condition",
"array",... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L594-L610 | train |
ejhigson/nestcheck | nestcheck/data_processing.py | threads_given_birth_inds | def threads_given_birth_inds(birth_inds):
"""Divides a nested sampling run into threads, using info on the indexes
at which points were sampled. See "Sampling errors in nested sampling
parameter estimation" (Higson et al. 2018) for more information.
Parameters
----------
birth_inds: 1d numpy array
Indexes of the iso-likelihood contours from within which each point was
sampled ("born").
Returns
-------
thread_labels: 1d numpy array of ints
labels of the thread each point belongs to.
"""
unique, counts = np.unique(birth_inds, return_counts=True)
# First get a list of all the indexes on which threads start and their
# counts. This is every point initially sampled from the prior, plus any
# indexes where more than one point is sampled.
thread_start_inds = np.concatenate((
unique[:1], unique[1:][counts[1:] > 1]))
thread_start_counts = np.concatenate((
counts[:1], counts[1:][counts[1:] > 1] - 1))
thread_labels = np.full(birth_inds.shape, np.nan)
thread_num = 0
for nmulti, multi in enumerate(thread_start_inds):
for i, start_ind in enumerate(np.where(birth_inds == multi)[0]):
# unless nmulti=0 the first point born on the contour (i=0) is
# already assigned to a thread
if i != 0 or nmulti == 0:
# check point has not already been assigned
assert np.isnan(thread_labels[start_ind])
thread_labels[start_ind] = thread_num
# find the point which replaced it
next_ind = np.where(birth_inds == start_ind)[0]
while next_ind.shape != (0,):
# check point has not already been assigned
assert np.isnan(thread_labels[next_ind[0]])
thread_labels[next_ind[0]] = thread_num
# find the point which replaced it
next_ind = np.where(birth_inds == next_ind[0])[0]
thread_num += 1
if not np.all(~np.isnan(thread_labels)):
warnings.warn((
'{} points (out of a total of {}) were not given a thread label! '
'This is likely due to small numerical errors in your nested '
'sampling software while running the calculation or writing the '
'input files. '
'I will try to give an approximate answer by randomly assigning '
'these points to threads.'
'\nIndexes without labels are {}'
'\nIndexes on which threads start are {} with {} threads '
'starting on each.').format(
(np.isnan(thread_labels)).sum(), birth_inds.shape[0],
np.where(np.isnan(thread_labels))[0],
thread_start_inds, thread_start_counts))
inds = np.where(np.isnan(thread_labels))[0]
state = np.random.get_state() # Save random state before seeding
np.random.seed(0) # make thread decomposition is reproducible
for ind in inds:
# Get the set of threads with members both before and after ind to
# ensure we don't change nlive_array by extending a thread
labels_to_choose = np.intersect1d( # N.B. this removes nans too
thread_labels[:ind], thread_labels[ind + 1:])
if labels_to_choose.shape[0] == 0:
# In edge case that there is no intersection, just randomly
# select from non-nan thread labels
labels_to_choose = np.unique(
thread_labels[~np.isnan(thread_labels)])
thread_labels[ind] = np.random.choice(labels_to_choose)
np.random.set_state(state) # Reset random state
assert np.all(~np.isnan(thread_labels)), (
'{} points still do not have thread labels'.format(
(np.isnan(thread_labels)).sum()))
assert np.array_equal(thread_labels, thread_labels.astype(int)), (
'Thread labels should all be ints!')
thread_labels = thread_labels.astype(int)
# Check unique thread labels are a sequence from 0 to nthreads-1
assert np.array_equal(
np.unique(thread_labels),
np.asarray(range(sum(thread_start_counts)))), (
str(np.unique(thread_labels)) + ' is not equal to range('
+ str(sum(thread_start_counts)) + ')')
return thread_labels | python | def threads_given_birth_inds(birth_inds):
"""Divides a nested sampling run into threads, using info on the indexes
at which points were sampled. See "Sampling errors in nested sampling
parameter estimation" (Higson et al. 2018) for more information.
Parameters
----------
birth_inds: 1d numpy array
Indexes of the iso-likelihood contours from within which each point was
sampled ("born").
Returns
-------
thread_labels: 1d numpy array of ints
labels of the thread each point belongs to.
"""
unique, counts = np.unique(birth_inds, return_counts=True)
# First get a list of all the indexes on which threads start and their
# counts. This is every point initially sampled from the prior, plus any
# indexes where more than one point is sampled.
thread_start_inds = np.concatenate((
unique[:1], unique[1:][counts[1:] > 1]))
thread_start_counts = np.concatenate((
counts[:1], counts[1:][counts[1:] > 1] - 1))
thread_labels = np.full(birth_inds.shape, np.nan)
thread_num = 0
for nmulti, multi in enumerate(thread_start_inds):
for i, start_ind in enumerate(np.where(birth_inds == multi)[0]):
# unless nmulti=0 the first point born on the contour (i=0) is
# already assigned to a thread
if i != 0 or nmulti == 0:
# check point has not already been assigned
assert np.isnan(thread_labels[start_ind])
thread_labels[start_ind] = thread_num
# find the point which replaced it
next_ind = np.where(birth_inds == start_ind)[0]
while next_ind.shape != (0,):
# check point has not already been assigned
assert np.isnan(thread_labels[next_ind[0]])
thread_labels[next_ind[0]] = thread_num
# find the point which replaced it
next_ind = np.where(birth_inds == next_ind[0])[0]
thread_num += 1
if not np.all(~np.isnan(thread_labels)):
warnings.warn((
'{} points (out of a total of {}) were not given a thread label! '
'This is likely due to small numerical errors in your nested '
'sampling software while running the calculation or writing the '
'input files. '
'I will try to give an approximate answer by randomly assigning '
'these points to threads.'
'\nIndexes without labels are {}'
'\nIndexes on which threads start are {} with {} threads '
'starting on each.').format(
(np.isnan(thread_labels)).sum(), birth_inds.shape[0],
np.where(np.isnan(thread_labels))[0],
thread_start_inds, thread_start_counts))
inds = np.where(np.isnan(thread_labels))[0]
state = np.random.get_state() # Save random state before seeding
np.random.seed(0) # make thread decomposition is reproducible
for ind in inds:
# Get the set of threads with members both before and after ind to
# ensure we don't change nlive_array by extending a thread
labels_to_choose = np.intersect1d( # N.B. this removes nans too
thread_labels[:ind], thread_labels[ind + 1:])
if labels_to_choose.shape[0] == 0:
# In edge case that there is no intersection, just randomly
# select from non-nan thread labels
labels_to_choose = np.unique(
thread_labels[~np.isnan(thread_labels)])
thread_labels[ind] = np.random.choice(labels_to_choose)
np.random.set_state(state) # Reset random state
assert np.all(~np.isnan(thread_labels)), (
'{} points still do not have thread labels'.format(
(np.isnan(thread_labels)).sum()))
assert np.array_equal(thread_labels, thread_labels.astype(int)), (
'Thread labels should all be ints!')
thread_labels = thread_labels.astype(int)
# Check unique thread labels are a sequence from 0 to nthreads-1
assert np.array_equal(
np.unique(thread_labels),
np.asarray(range(sum(thread_start_counts)))), (
str(np.unique(thread_labels)) + ' is not equal to range('
+ str(sum(thread_start_counts)) + ')')
return thread_labels | [
"def",
"threads_given_birth_inds",
"(",
"birth_inds",
")",
":",
"unique",
",",
"counts",
"=",
"np",
".",
"unique",
"(",
"birth_inds",
",",
"return_counts",
"=",
"True",
")",
"# First get a list of all the indexes on which threads start and their",
"# counts. This is every p... | Divides a nested sampling run into threads, using info on the indexes
at which points were sampled. See "Sampling errors in nested sampling
parameter estimation" (Higson et al. 2018) for more information.
Parameters
----------
birth_inds: 1d numpy array
Indexes of the iso-likelihood contours from within which each point was
sampled ("born").
Returns
-------
thread_labels: 1d numpy array of ints
labels of the thread each point belongs to. | [
"Divides",
"a",
"nested",
"sampling",
"run",
"into",
"threads",
"using",
"info",
"on",
"the",
"indexes",
"at",
"which",
"points",
"were",
"sampled",
".",
"See",
"Sampling",
"errors",
"in",
"nested",
"sampling",
"parameter",
"estimation",
"(",
"Higson",
"et",
... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L613-L697 | train |
ejhigson/nestcheck | nestcheck/parallel_utils.py | parallel_map | def parallel_map(func, *arg_iterable, **kwargs):
"""Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
"""
chunksize = kwargs.pop('chunksize', 1)
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)
if parallel:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))
else:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return list(map(func_to_map, *arg_iterable)) | python | def parallel_map(func, *arg_iterable, **kwargs):
"""Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
"""
chunksize = kwargs.pop('chunksize', 1)
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)
if parallel:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))
else:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return list(map(func_to_map, *arg_iterable)) | [
"def",
"parallel_map",
"(",
"func",
",",
"*",
"arg_iterable",
",",
"*",
"*",
"kwargs",
")",
":",
"chunksize",
"=",
"kwargs",
".",
"pop",
"(",
"'chunksize'",
",",
"1",
")",
"func_pre_args",
"=",
"kwargs",
".",
"pop",
"(",
"'func_pre_args'",
",",
"(",
")... | Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs | [
"Apply",
"function",
"to",
"iterable",
"with",
"parallel",
"map",
"and",
"hence",
"returns",
"results",
"in",
"order",
".",
"functools",
".",
"partial",
"is",
"used",
"to",
"freeze",
"func_pre_args",
"and",
"func_kwargs",
"meaning",
"that",
"the",
"iterable",
... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/parallel_utils.py#L12-L67 | train |
ejhigson/nestcheck | nestcheck/parallel_utils.py | parallel_apply | def parallel_apply(func, arg_iterable, **kwargs):
"""Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
func_args: tuple, optional
Additional positional arguments for func.
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
"""
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
func_args = kwargs.pop('func_args', ())
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if 'leave' not in tqdm_kwargs: # default to leave=False
tqdm_kwargs['leave'] = False
assert isinstance(func_args, tuple), (
str(func_args) + ' is type ' + str(type(func_args)))
assert isinstance(func_pre_args, tuple), (
str(func_pre_args) + ' is type ' + str(type(func_pre_args)))
progress = select_tqdm()
if not parallel:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for
x in progress(arg_iterable, **tqdm_kwargs)]
else:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
futures = []
for element in arg_iterable:
futures.append(pool.submit(
func, *(func_pre_args + (element,) + func_args),
**func_kwargs))
results = []
for fut in progress(concurrent.futures.as_completed(futures),
total=len(arg_iterable), **tqdm_kwargs):
results.append(fut.result())
return results | python | def parallel_apply(func, arg_iterable, **kwargs):
"""Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
func_args: tuple, optional
Additional positional arguments for func.
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
"""
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
func_args = kwargs.pop('func_args', ())
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if 'leave' not in tqdm_kwargs: # default to leave=False
tqdm_kwargs['leave'] = False
assert isinstance(func_args, tuple), (
str(func_args) + ' is type ' + str(type(func_args)))
assert isinstance(func_pre_args, tuple), (
str(func_pre_args) + ' is type ' + str(type(func_pre_args)))
progress = select_tqdm()
if not parallel:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for
x in progress(arg_iterable, **tqdm_kwargs)]
else:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
futures = []
for element in arg_iterable:
futures.append(pool.submit(
func, *(func_pre_args + (element,) + func_args),
**func_kwargs))
results = []
for fut in progress(concurrent.futures.as_completed(futures),
total=len(arg_iterable), **tqdm_kwargs):
results.append(fut.result())
return results | [
"def",
"parallel_apply",
"(",
"func",
",",
"arg_iterable",
",",
"*",
"*",
"kwargs",
")",
":",
"max_workers",
"=",
"kwargs",
".",
"pop",
"(",
"'max_workers'",
",",
"None",
")",
"parallel",
"=",
"kwargs",
".",
"pop",
"(",
"'parallel'",
",",
"True",
")",
... | Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
func_args: tuple, optional
Additional positional arguments for func.
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs | [
"Apply",
"function",
"to",
"iterable",
"with",
"parallelisation",
"and",
"a",
"tqdm",
"progress",
"bar",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/parallel_utils.py#L70-L142 | train |
ejhigson/nestcheck | nestcheck/parallel_utils.py | select_tqdm | def select_tqdm():
"""If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function
"""
try:
progress = tqdm.tqdm_notebook
assert get_ipython().has_trait('kernel')
except (NameError, AssertionError):
progress = tqdm.tqdm
return progress | python | def select_tqdm():
"""If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function
"""
try:
progress = tqdm.tqdm_notebook
assert get_ipython().has_trait('kernel')
except (NameError, AssertionError):
progress = tqdm.tqdm
return progress | [
"def",
"select_tqdm",
"(",
")",
":",
"try",
":",
"progress",
"=",
"tqdm",
".",
"tqdm_notebook",
"assert",
"get_ipython",
"(",
")",
".",
"has_trait",
"(",
"'kernel'",
")",
"except",
"(",
"NameError",
",",
"AssertionError",
")",
":",
"progress",
"=",
"tqdm",... | If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function | [
"If",
"running",
"in",
"a",
"jupyter",
"notebook",
"then",
"returns",
"tqdm_notebook",
".",
"Otherwise",
"returns",
"a",
"regular",
"tqdm",
"progress",
"bar",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/parallel_utils.py#L145-L158 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | summary_df_from_array | def summary_df_from_array(results_array, names, axis=0, **kwargs):
"""Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This function converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_array: 2d numpy array
names: list of str
Names for the output df's columns.
axis: int, optional
Axis on which to calculate summary statistics.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
assert axis == 0 or axis == 1
df = pd.DataFrame(results_array)
if axis == 1:
df = df.T
df.columns = names
return summary_df(df, **kwargs) | python | def summary_df_from_array(results_array, names, axis=0, **kwargs):
"""Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This function converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_array: 2d numpy array
names: list of str
Names for the output df's columns.
axis: int, optional
Axis on which to calculate summary statistics.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
assert axis == 0 or axis == 1
df = pd.DataFrame(results_array)
if axis == 1:
df = df.T
df.columns = names
return summary_df(df, **kwargs) | [
"def",
"summary_df_from_array",
"(",
"results_array",
",",
"names",
",",
"axis",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"axis",
"==",
"0",
"or",
"axis",
"==",
"1",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"results_array",
")",
"if",
"a... | Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This function converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_array: 2d numpy array
names: list of str
Names for the output df's columns.
axis: int, optional
Axis on which to calculate summary statistics.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details. | [
"Make",
"a",
"panda",
"data",
"frame",
"of",
"the",
"mean",
"and",
"std",
"devs",
"of",
"an",
"array",
"of",
"results",
"including",
"the",
"uncertainties",
"on",
"the",
"values",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L12-L36 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | summary_df_from_list | def summary_df_from_list(results_list, names, **kwargs):
"""Make a panda data frame of the mean and std devs of each element of a
list of 1d arrays, including the uncertainties on the values.
This just converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_list: list of 1d numpy arrays
Must have same length as names.
names: list of strs
Names for the output df's columns.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
for arr in results_list:
assert arr.shape == (len(names),)
df = pd.DataFrame(np.stack(results_list, axis=0))
df.columns = names
return summary_df(df, **kwargs) | python | def summary_df_from_list(results_list, names, **kwargs):
"""Make a panda data frame of the mean and std devs of each element of a
list of 1d arrays, including the uncertainties on the values.
This just converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_list: list of 1d numpy arrays
Must have same length as names.
names: list of strs
Names for the output df's columns.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
for arr in results_list:
assert arr.shape == (len(names),)
df = pd.DataFrame(np.stack(results_list, axis=0))
df.columns = names
return summary_df(df, **kwargs) | [
"def",
"summary_df_from_list",
"(",
"results_list",
",",
"names",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"arr",
"in",
"results_list",
":",
"assert",
"arr",
".",
"shape",
"==",
"(",
"len",
"(",
"names",
")",
",",
")",
"df",
"=",
"pd",
".",
"DataFra... | Make a panda data frame of the mean and std devs of each element of a
list of 1d arrays, including the uncertainties on the values.
This just converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_list: list of 1d numpy arrays
Must have same length as names.
names: list of strs
Names for the output df's columns.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details. | [
"Make",
"a",
"panda",
"data",
"frame",
"of",
"the",
"mean",
"and",
"std",
"devs",
"of",
"each",
"element",
"of",
"a",
"list",
"of",
"1d",
"arrays",
"including",
"the",
"uncertainties",
"on",
"the",
"values",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L39-L63 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | summary_df_from_multi | def summary_df_from_multi(multi_in, inds_to_keep=None, **kwargs):
"""Apply summary_df to a multiindex while preserving some levels.
Parameters
----------
multi_in: multiindex pandas DataFrame
inds_to_keep: None or list of strs, optional
Index levels to preserve.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
# Need to pop include true values and add separately at the end as
# otherwise we get multiple true values added
include_true_values = kwargs.pop('include_true_values', False)
true_values = kwargs.get('true_values', None)
if inds_to_keep is None:
inds_to_keep = list(multi_in.index.names)[:-1]
if 'calculation type' not in inds_to_keep:
df = multi_in.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
else:
# If there is already a level called 'calculation type' in multi,
# summary_df will try making a second 'calculation type' index and (as
# of pandas v0.23.0) throw an error. Avoid this by renaming.
inds_to_keep = [lev if lev != 'calculation type' else
'calculation type temp' for lev in inds_to_keep]
multi_temp = copy.deepcopy(multi_in)
multi_temp.index.set_names(
[lev if lev != 'calculation type' else 'calculation type temp' for
lev in list(multi_temp.index.names)], inplace=True)
df = multi_temp.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
# add the 'calculation type' values ('mean' and 'std') produced by
# summary_df to the input calculation type names (now in level
# 'calculation type temp')
ind = (df.index.get_level_values('calculation type temp') + ' ' +
df.index.get_level_values('calculation type'))
order = list(df.index.names)
order.remove('calculation type temp')
df.index = df.index.droplevel(
['calculation type', 'calculation type temp'])
df['calculation type'] = list(ind)
df.set_index('calculation type', append=True, inplace=True)
df = df.reorder_levels(order)
if include_true_values:
assert true_values is not None
tv_ind = ['true values' if name == 'calculation type' else '' for
name in df.index.names[:-1]] + ['value']
df.loc[tuple(tv_ind), :] = true_values
return df | python | def summary_df_from_multi(multi_in, inds_to_keep=None, **kwargs):
"""Apply summary_df to a multiindex while preserving some levels.
Parameters
----------
multi_in: multiindex pandas DataFrame
inds_to_keep: None or list of strs, optional
Index levels to preserve.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
"""
# Need to pop include true values and add separately at the end as
# otherwise we get multiple true values added
include_true_values = kwargs.pop('include_true_values', False)
true_values = kwargs.get('true_values', None)
if inds_to_keep is None:
inds_to_keep = list(multi_in.index.names)[:-1]
if 'calculation type' not in inds_to_keep:
df = multi_in.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
else:
# If there is already a level called 'calculation type' in multi,
# summary_df will try making a second 'calculation type' index and (as
# of pandas v0.23.0) throw an error. Avoid this by renaming.
inds_to_keep = [lev if lev != 'calculation type' else
'calculation type temp' for lev in inds_to_keep]
multi_temp = copy.deepcopy(multi_in)
multi_temp.index.set_names(
[lev if lev != 'calculation type' else 'calculation type temp' for
lev in list(multi_temp.index.names)], inplace=True)
df = multi_temp.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
# add the 'calculation type' values ('mean' and 'std') produced by
# summary_df to the input calculation type names (now in level
# 'calculation type temp')
ind = (df.index.get_level_values('calculation type temp') + ' ' +
df.index.get_level_values('calculation type'))
order = list(df.index.names)
order.remove('calculation type temp')
df.index = df.index.droplevel(
['calculation type', 'calculation type temp'])
df['calculation type'] = list(ind)
df.set_index('calculation type', append=True, inplace=True)
df = df.reorder_levels(order)
if include_true_values:
assert true_values is not None
tv_ind = ['true values' if name == 'calculation type' else '' for
name in df.index.names[:-1]] + ['value']
df.loc[tuple(tv_ind), :] = true_values
return df | [
"def",
"summary_df_from_multi",
"(",
"multi_in",
",",
"inds_to_keep",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Need to pop include true values and add separately at the end as",
"# otherwise we get multiple true values added",
"include_true_values",
"=",
"kwargs",
"."... | Apply summary_df to a multiindex while preserving some levels.
Parameters
----------
multi_in: multiindex pandas DataFrame
inds_to_keep: None or list of strs, optional
Index levels to preserve.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details. | [
"Apply",
"summary_df",
"to",
"a",
"multiindex",
"while",
"preserving",
"some",
"levels",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L66-L120 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | summary_df | def summary_df(df_in, **kwargs):
"""Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This is similar to pandas.DataFrame.describe but also includes estimates of
the numerical uncertainties.
The output DataFrame has multiindex levels:
'calculation type': mean and standard deviations of the data.
'result type': value and uncertainty for each quantity.
calculation type result type column_1 column_2 ...
mean value
mean uncertainty
std value
std uncertainty
Parameters
----------
df_in: pandas DataFrame
true_values: array
Analytical values if known for comparison with mean. Used to
calculate root mean squared errors (RMSE).
include_true_values: bool, optional
Whether or not to include true values in the output DataFrame.
include_rmse: bool, optional
Whether or not to include root-mean-squared-errors in the output
DataFrame.
Returns
-------
df: MultiIndex DataFrame
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if true_values is not None:
assert true_values.shape[0] == df_in.shape[1], (
'There should be one true value for every column! '
'true_values.shape=' + str(true_values.shape) + ', '
'df_in.shape=' + str(df_in.shape))
# make the data frame
df = pd.DataFrame([df_in.mean(axis=0), df_in.std(axis=0, ddof=1)],
index=['mean', 'std'])
if include_true_values:
assert true_values is not None
df.loc['true values'] = true_values
# Make index categorical to allow sorting
df.index = pd.CategoricalIndex(df.index.values, ordered=True,
categories=['true values', 'mean', 'std',
'rmse'],
name='calculation type')
# add uncertainties
num_cals = df_in.shape[0]
mean_unc = df.loc['std'] / np.sqrt(num_cals)
std_unc = df.loc['std'] * np.sqrt(1 / (2 * (num_cals - 1)))
df['result type'] = pd.Categorical(['value'] * df.shape[0], ordered=True,
categories=['value', 'uncertainty'])
df.set_index(['result type'], drop=True, append=True, inplace=True)
df.loc[('mean', 'uncertainty'), :] = mean_unc.values
df.loc[('std', 'uncertainty'), :] = std_unc.values
if include_rmse:
assert true_values is not None, \
'Need to input true values for RMSE!'
rmse, rmse_unc = rmse_and_unc(df_in.values, true_values)
df.loc[('rmse', 'value'), :] = rmse
df.loc[('rmse', 'uncertainty'), :] = rmse_unc
# Ensure correct row order by sorting
df.sort_index(inplace=True)
# Cast calculation type index back from categorical to string to allow
# adding new calculation types
df.set_index(
[df.index.get_level_values('calculation type').astype(str),
df.index.get_level_values('result type')],
inplace=True)
return df | python | def summary_df(df_in, **kwargs):
"""Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This is similar to pandas.DataFrame.describe but also includes estimates of
the numerical uncertainties.
The output DataFrame has multiindex levels:
'calculation type': mean and standard deviations of the data.
'result type': value and uncertainty for each quantity.
calculation type result type column_1 column_2 ...
mean value
mean uncertainty
std value
std uncertainty
Parameters
----------
df_in: pandas DataFrame
true_values: array
Analytical values if known for comparison with mean. Used to
calculate root mean squared errors (RMSE).
include_true_values: bool, optional
Whether or not to include true values in the output DataFrame.
include_rmse: bool, optional
Whether or not to include root-mean-squared-errors in the output
DataFrame.
Returns
-------
df: MultiIndex DataFrame
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if true_values is not None:
assert true_values.shape[0] == df_in.shape[1], (
'There should be one true value for every column! '
'true_values.shape=' + str(true_values.shape) + ', '
'df_in.shape=' + str(df_in.shape))
# make the data frame
df = pd.DataFrame([df_in.mean(axis=0), df_in.std(axis=0, ddof=1)],
index=['mean', 'std'])
if include_true_values:
assert true_values is not None
df.loc['true values'] = true_values
# Make index categorical to allow sorting
df.index = pd.CategoricalIndex(df.index.values, ordered=True,
categories=['true values', 'mean', 'std',
'rmse'],
name='calculation type')
# add uncertainties
num_cals = df_in.shape[0]
mean_unc = df.loc['std'] / np.sqrt(num_cals)
std_unc = df.loc['std'] * np.sqrt(1 / (2 * (num_cals - 1)))
df['result type'] = pd.Categorical(['value'] * df.shape[0], ordered=True,
categories=['value', 'uncertainty'])
df.set_index(['result type'], drop=True, append=True, inplace=True)
df.loc[('mean', 'uncertainty'), :] = mean_unc.values
df.loc[('std', 'uncertainty'), :] = std_unc.values
if include_rmse:
assert true_values is not None, \
'Need to input true values for RMSE!'
rmse, rmse_unc = rmse_and_unc(df_in.values, true_values)
df.loc[('rmse', 'value'), :] = rmse
df.loc[('rmse', 'uncertainty'), :] = rmse_unc
# Ensure correct row order by sorting
df.sort_index(inplace=True)
# Cast calculation type index back from categorical to string to allow
# adding new calculation types
df.set_index(
[df.index.get_level_values('calculation type').astype(str),
df.index.get_level_values('result type')],
inplace=True)
return df | [
"def",
"summary_df",
"(",
"df_in",
",",
"*",
"*",
"kwargs",
")",
":",
"true_values",
"=",
"kwargs",
".",
"pop",
"(",
"'true_values'",
",",
"None",
")",
"include_true_values",
"=",
"kwargs",
".",
"pop",
"(",
"'include_true_values'",
",",
"False",
")",
"incl... | Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This is similar to pandas.DataFrame.describe but also includes estimates of
the numerical uncertainties.
The output DataFrame has multiindex levels:
'calculation type': mean and standard deviations of the data.
'result type': value and uncertainty for each quantity.
calculation type result type column_1 column_2 ...
mean value
mean uncertainty
std value
std uncertainty
Parameters
----------
df_in: pandas DataFrame
true_values: array
Analytical values if known for comparison with mean. Used to
calculate root mean squared errors (RMSE).
include_true_values: bool, optional
Whether or not to include true values in the output DataFrame.
include_rmse: bool, optional
Whether or not to include root-mean-squared-errors in the output
DataFrame.
Returns
-------
df: MultiIndex DataFrame | [
"Make",
"a",
"panda",
"data",
"frame",
"of",
"the",
"mean",
"and",
"std",
"devs",
"of",
"an",
"array",
"of",
"results",
"including",
"the",
"uncertainties",
"on",
"the",
"values",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L123-L202 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | efficiency_gain_df | def efficiency_gain_df(method_names, method_values, est_names, **kwargs):
r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the first
method input.
The output DataFrame will contain rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
Parameters
----------
method names: list of strs
method values: list
Each element is a list of 1d arrays of results for the method. Each
array must have shape (len(est_names),).
est_names: list of strs
Provide column titles for output df.
true_values: iterable of same length as estimators list
True values of the estimators for the given likelihood and prior.
Returns
-------
results: pandas data frame
Results data frame.
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
adjust_nsamp = kwargs.pop('adjust_nsamp', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if adjust_nsamp is not None:
assert adjust_nsamp.shape == (len(method_names),)
assert len(method_names) == len(method_values)
df_dict = {}
for i, method_name in enumerate(method_names):
# Set include_true_values=False as we don't want them repeated for
# every method
df = summary_df_from_list(
method_values[i], est_names, true_values=true_values,
include_true_values=False, include_rmse=include_rmse)
if i != 0:
stats = ['std']
if include_rmse:
stats.append('rmse')
if adjust_nsamp is not None:
# Efficiency gain measures performance per number of
# samples (proportional to computational work). If the
# number of samples is not the same we can adjust this.
adjust = (adjust_nsamp[0] / adjust_nsamp[i])
else:
adjust = 1
for stat in stats:
# Calculate efficiency gain vs standard nested sampling
gain, gain_unc = get_eff_gain(
df_dict[method_names[0]].loc[(stat, 'value')],
df_dict[method_names[0]].loc[(stat, 'uncertainty')],
df.loc[(stat, 'value')],
df.loc[(stat, 'uncertainty')], adjust=adjust)
key = stat + ' efficiency gain'
df.loc[(key, 'value'), :] = gain
df.loc[(key, 'uncertainty'), :] = gain_unc
df_dict[method_name] = df
results = pd.concat(df_dict)
results.index.rename('dynamic settings', level=0, inplace=True)
new_ind = []
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('calculation type'), ordered=True,
categories=['true values', 'mean', 'std', 'rmse',
'std efficiency gain', 'rmse efficiency gain']))
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('dynamic settings'),
ordered=True, categories=[''] + method_names))
new_ind.append(results.index.get_level_values('result type'))
results.set_index(new_ind, inplace=True)
if include_true_values:
with warnings.catch_warnings():
# Performance not an issue here so suppress annoying warning
warnings.filterwarnings('ignore', message=(
'indexing past lexsort depth may impact performance.'))
results.loc[('true values', '', 'value'), :] = true_values
results.sort_index(inplace=True)
return results | python | def efficiency_gain_df(method_names, method_values, est_names, **kwargs):
r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the first
method input.
The output DataFrame will contain rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
Parameters
----------
method names: list of strs
method values: list
Each element is a list of 1d arrays of results for the method. Each
array must have shape (len(est_names),).
est_names: list of strs
Provide column titles for output df.
true_values: iterable of same length as estimators list
True values of the estimators for the given likelihood and prior.
Returns
-------
results: pandas data frame
Results data frame.
"""
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
adjust_nsamp = kwargs.pop('adjust_nsamp', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if adjust_nsamp is not None:
assert adjust_nsamp.shape == (len(method_names),)
assert len(method_names) == len(method_values)
df_dict = {}
for i, method_name in enumerate(method_names):
# Set include_true_values=False as we don't want them repeated for
# every method
df = summary_df_from_list(
method_values[i], est_names, true_values=true_values,
include_true_values=False, include_rmse=include_rmse)
if i != 0:
stats = ['std']
if include_rmse:
stats.append('rmse')
if adjust_nsamp is not None:
# Efficiency gain measures performance per number of
# samples (proportional to computational work). If the
# number of samples is not the same we can adjust this.
adjust = (adjust_nsamp[0] / adjust_nsamp[i])
else:
adjust = 1
for stat in stats:
# Calculate efficiency gain vs standard nested sampling
gain, gain_unc = get_eff_gain(
df_dict[method_names[0]].loc[(stat, 'value')],
df_dict[method_names[0]].loc[(stat, 'uncertainty')],
df.loc[(stat, 'value')],
df.loc[(stat, 'uncertainty')], adjust=adjust)
key = stat + ' efficiency gain'
df.loc[(key, 'value'), :] = gain
df.loc[(key, 'uncertainty'), :] = gain_unc
df_dict[method_name] = df
results = pd.concat(df_dict)
results.index.rename('dynamic settings', level=0, inplace=True)
new_ind = []
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('calculation type'), ordered=True,
categories=['true values', 'mean', 'std', 'rmse',
'std efficiency gain', 'rmse efficiency gain']))
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('dynamic settings'),
ordered=True, categories=[''] + method_names))
new_ind.append(results.index.get_level_values('result type'))
results.set_index(new_ind, inplace=True)
if include_true_values:
with warnings.catch_warnings():
# Performance not an issue here so suppress annoying warning
warnings.filterwarnings('ignore', message=(
'indexing past lexsort depth may impact performance.'))
results.loc[('true values', '', 'value'), :] = true_values
results.sort_index(inplace=True)
return results | [
"def",
"efficiency_gain_df",
"(",
"method_names",
",",
"method_values",
",",
"est_names",
",",
"*",
"*",
"kwargs",
")",
":",
"true_values",
"=",
"kwargs",
".",
"pop",
"(",
"'true_values'",
",",
"None",
")",
"include_true_values",
"=",
"kwargs",
".",
"pop",
"... | r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the first
method input.
The output DataFrame will contain rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
Parameters
----------
method names: list of strs
method values: list
Each element is a list of 1d arrays of results for the method. Each
array must have shape (len(est_names),).
est_names: list of strs
Provide column titles for output df.
true_values: iterable of same length as estimators list
True values of the estimators for the given likelihood and prior.
Returns
-------
results: pandas data frame
Results data frame. | [
"r",
"Calculated",
"data",
"frame",
"showing"
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L205-L306 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | paper_format_efficiency_gain_df | def paper_format_efficiency_gain_df(eff_gain_df):
"""Transform efficiency gain data frames output by nestcheck into the
format shown in the dynamic nested sampling paper (Higson et al. 2019).
Parameters
----------
eff_gain_df: pandas DataFrame
DataFrame of the from produced by efficiency_gain_df.
Returns
-------
paper_df: pandas DataFrame
"""
idxs = pd.IndexSlice[['std', 'std efficiency gain'], :, :]
paper_df = copy.deepcopy(eff_gain_df.loc[idxs, :])
# Show mean number of samples and likelihood calls instead of st dev
means = (eff_gain_df.xs('mean', level='calculation type')
.xs('value', level='result type'))
for col in ['samples', 'likelihood calls']:
try:
col_vals = []
for val in means[col].values:
col_vals += [int(np.rint(val)), np.nan]
col_vals += [np.nan] * (paper_df.shape[0] - len(col_vals))
paper_df[col] = col_vals
except KeyError:
pass
row_name_map = {'std efficiency gain': 'Efficiency gain',
'St.Dev. efficiency gain': 'Efficiency gain',
'dynamic ': '',
'std': 'St.Dev.'}
row_names = (paper_df.index.get_level_values(0).astype(str) + ' ' +
paper_df.index.get_level_values(1).astype(str))
for key, value in row_name_map.items():
row_names = row_names.str.replace(key, value)
paper_df.index = [row_names, paper_df.index.get_level_values(2)]
return paper_df | python | def paper_format_efficiency_gain_df(eff_gain_df):
"""Transform efficiency gain data frames output by nestcheck into the
format shown in the dynamic nested sampling paper (Higson et al. 2019).
Parameters
----------
eff_gain_df: pandas DataFrame
DataFrame of the from produced by efficiency_gain_df.
Returns
-------
paper_df: pandas DataFrame
"""
idxs = pd.IndexSlice[['std', 'std efficiency gain'], :, :]
paper_df = copy.deepcopy(eff_gain_df.loc[idxs, :])
# Show mean number of samples and likelihood calls instead of st dev
means = (eff_gain_df.xs('mean', level='calculation type')
.xs('value', level='result type'))
for col in ['samples', 'likelihood calls']:
try:
col_vals = []
for val in means[col].values:
col_vals += [int(np.rint(val)), np.nan]
col_vals += [np.nan] * (paper_df.shape[0] - len(col_vals))
paper_df[col] = col_vals
except KeyError:
pass
row_name_map = {'std efficiency gain': 'Efficiency gain',
'St.Dev. efficiency gain': 'Efficiency gain',
'dynamic ': '',
'std': 'St.Dev.'}
row_names = (paper_df.index.get_level_values(0).astype(str) + ' ' +
paper_df.index.get_level_values(1).astype(str))
for key, value in row_name_map.items():
row_names = row_names.str.replace(key, value)
paper_df.index = [row_names, paper_df.index.get_level_values(2)]
return paper_df | [
"def",
"paper_format_efficiency_gain_df",
"(",
"eff_gain_df",
")",
":",
"idxs",
"=",
"pd",
".",
"IndexSlice",
"[",
"[",
"'std'",
",",
"'std efficiency gain'",
"]",
",",
":",
",",
":",
"]",
"paper_df",
"=",
"copy",
".",
"deepcopy",
"(",
"eff_gain_df",
".",
... | Transform efficiency gain data frames output by nestcheck into the
format shown in the dynamic nested sampling paper (Higson et al. 2019).
Parameters
----------
eff_gain_df: pandas DataFrame
DataFrame of the from produced by efficiency_gain_df.
Returns
-------
paper_df: pandas DataFrame | [
"Transform",
"efficiency",
"gain",
"data",
"frames",
"output",
"by",
"nestcheck",
"into",
"the",
"format",
"shown",
"in",
"the",
"dynamic",
"nested",
"sampling",
"paper",
"(",
"Higson",
"et",
"al",
".",
"2019",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L309-L345 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | get_eff_gain | def get_eff_gain(base_std, base_std_unc, meth_std, meth_std_unc, adjust=1):
r"""Calculates efficiency gain for a new method compared to a base method.
Given the variation in repeated calculations' results using the two
methods, the efficiency gain is:
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
The uncertainty on the efficiency gain is also calculated.
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
Parameters
----------
base_std: 1d numpy array
base_std_unc: 1d numpy array
Uncertainties on base_std.
meth_std: 1d numpy array
meth_std_unc: 1d numpy array
Uncertainties on base_std.
Returns
-------
gain: 1d numpy array
gain_unc: 1d numpy array
Uncertainties on gain.
"""
ratio = base_std / meth_std
ratio_unc = array_ratio_std(
base_std, base_std_unc, meth_std, meth_std_unc)
gain = ratio ** 2
gain_unc = 2 * ratio * ratio_unc
gain *= adjust
gain_unc *= adjust
return gain, gain_unc | python | def get_eff_gain(base_std, base_std_unc, meth_std, meth_std_unc, adjust=1):
r"""Calculates efficiency gain for a new method compared to a base method.
Given the variation in repeated calculations' results using the two
methods, the efficiency gain is:
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
The uncertainty on the efficiency gain is also calculated.
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
Parameters
----------
base_std: 1d numpy array
base_std_unc: 1d numpy array
Uncertainties on base_std.
meth_std: 1d numpy array
meth_std_unc: 1d numpy array
Uncertainties on base_std.
Returns
-------
gain: 1d numpy array
gain_unc: 1d numpy array
Uncertainties on gain.
"""
ratio = base_std / meth_std
ratio_unc = array_ratio_std(
base_std, base_std_unc, meth_std, meth_std_unc)
gain = ratio ** 2
gain_unc = 2 * ratio * ratio_unc
gain *= adjust
gain_unc *= adjust
return gain, gain_unc | [
"def",
"get_eff_gain",
"(",
"base_std",
",",
"base_std_unc",
",",
"meth_std",
",",
"meth_std_unc",
",",
"adjust",
"=",
"1",
")",
":",
"ratio",
"=",
"base_std",
"/",
"meth_std",
"ratio_unc",
"=",
"array_ratio_std",
"(",
"base_std",
",",
"base_std_unc",
",",
"... | r"""Calculates efficiency gain for a new method compared to a base method.
Given the variation in repeated calculations' results using the two
methods, the efficiency gain is:
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
The uncertainty on the efficiency gain is also calculated.
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
Parameters
----------
base_std: 1d numpy array
base_std_unc: 1d numpy array
Uncertainties on base_std.
meth_std: 1d numpy array
meth_std_unc: 1d numpy array
Uncertainties on base_std.
Returns
-------
gain: 1d numpy array
gain_unc: 1d numpy array
Uncertainties on gain. | [
"r",
"Calculates",
"efficiency",
"gain",
"for",
"a",
"new",
"method",
"compared",
"to",
"a",
"base",
"method",
".",
"Given",
"the",
"variation",
"in",
"repeated",
"calculations",
"results",
"using",
"the",
"two",
"methods",
"the",
"efficiency",
"gain",
"is",
... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L352-L390 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | rmse_and_unc | def rmse_and_unc(values_array, true_values):
r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the error on :math:`X` then the error on :math:`\sqrt{X}`
is :math:`\frac{\sigma}{2 \sqrt{X}}`.
Parameters
----------
values_array: 2d numpy array
Array of results: each row corresponds to a different estimate of the
quantities considered.
true_values: 1d numpy array
Correct values for the quantities considered.
Returns
-------
rmse: 1d numpy array
Root-mean-squared-error for each quantity.
rmse_unc: 1d numpy array
Numerical uncertainties on each element of rmse.
"""
assert true_values.shape == (values_array.shape[1],)
errors = values_array - true_values[np.newaxis, :]
sq_errors = errors ** 2
sq_errors_mean = np.mean(sq_errors, axis=0)
sq_errors_mean_unc = (np.std(sq_errors, axis=0, ddof=1) /
np.sqrt(sq_errors.shape[0]))
rmse = np.sqrt(sq_errors_mean)
rmse_unc = 0.5 * (1 / rmse) * sq_errors_mean_unc
return rmse, rmse_unc | python | def rmse_and_unc(values_array, true_values):
r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the error on :math:`X` then the error on :math:`\sqrt{X}`
is :math:`\frac{\sigma}{2 \sqrt{X}}`.
Parameters
----------
values_array: 2d numpy array
Array of results: each row corresponds to a different estimate of the
quantities considered.
true_values: 1d numpy array
Correct values for the quantities considered.
Returns
-------
rmse: 1d numpy array
Root-mean-squared-error for each quantity.
rmse_unc: 1d numpy array
Numerical uncertainties on each element of rmse.
"""
assert true_values.shape == (values_array.shape[1],)
errors = values_array - true_values[np.newaxis, :]
sq_errors = errors ** 2
sq_errors_mean = np.mean(sq_errors, axis=0)
sq_errors_mean_unc = (np.std(sq_errors, axis=0, ddof=1) /
np.sqrt(sq_errors.shape[0]))
rmse = np.sqrt(sq_errors_mean)
rmse_unc = 0.5 * (1 / rmse) * sq_errors_mean_unc
return rmse, rmse_unc | [
"def",
"rmse_and_unc",
"(",
"values_array",
",",
"true_values",
")",
":",
"assert",
"true_values",
".",
"shape",
"==",
"(",
"values_array",
".",
"shape",
"[",
"1",
"]",
",",
")",
"errors",
"=",
"values_array",
"-",
"true_values",
"[",
"np",
".",
"newaxis",... | r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the error on :math:`X` then the error on :math:`\sqrt{X}`
is :math:`\frac{\sigma}{2 \sqrt{X}}`.
Parameters
----------
values_array: 2d numpy array
Array of results: each row corresponds to a different estimate of the
quantities considered.
true_values: 1d numpy array
Correct values for the quantities considered.
Returns
-------
rmse: 1d numpy array
Root-mean-squared-error for each quantity.
rmse_unc: 1d numpy array
Numerical uncertainties on each element of rmse. | [
"r",
"Calculate",
"the",
"root",
"meet",
"squared",
"error",
"and",
"its",
"numerical",
"uncertainty",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L393-L426 | train |
ejhigson/nestcheck | nestcheck/pandas_functions.py | array_ratio_std | def array_ratio_std(values_n, sigmas_n, values_d, sigmas_d):
r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
----------
values_n: float or numpy array
Numerator values.
sigmas_n: float or numpy array
:math:`1\sigma` uncertainties on values_n.
values_d: float or numpy array
Denominator values.
sigmas_d: float or numpy array
:math:`1\sigma` uncertainties on values_d.
Returns
-------
std: float or numpy array
:math:`1\sigma` uncertainty on values_n / values_d.
"""
std = np.sqrt((sigmas_n / values_n) ** 2 + (sigmas_d / values_d) ** 2)
std *= (values_n / values_d)
return std | python | def array_ratio_std(values_n, sigmas_n, values_d, sigmas_d):
r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
----------
values_n: float or numpy array
Numerator values.
sigmas_n: float or numpy array
:math:`1\sigma` uncertainties on values_n.
values_d: float or numpy array
Denominator values.
sigmas_d: float or numpy array
:math:`1\sigma` uncertainties on values_d.
Returns
-------
std: float or numpy array
:math:`1\sigma` uncertainty on values_n / values_d.
"""
std = np.sqrt((sigmas_n / values_n) ** 2 + (sigmas_d / values_d) ** 2)
std *= (values_n / values_d)
return std | [
"def",
"array_ratio_std",
"(",
"values_n",
",",
"sigmas_n",
",",
"values_d",
",",
"sigmas_d",
")",
":",
"std",
"=",
"np",
".",
"sqrt",
"(",
"(",
"sigmas_n",
"/",
"values_n",
")",
"**",
"2",
"+",
"(",
"sigmas_d",
"/",
"values_d",
")",
"**",
"2",
")",
... | r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
----------
values_n: float or numpy array
Numerator values.
sigmas_n: float or numpy array
:math:`1\sigma` uncertainties on values_n.
values_d: float or numpy array
Denominator values.
sigmas_d: float or numpy array
:math:`1\sigma` uncertainties on values_d.
Returns
-------
std: float or numpy array
:math:`1\sigma` uncertainty on values_n / values_d. | [
"r",
"Gives",
"error",
"on",
"the",
"ratio",
"of",
"2",
"floats",
"or",
"2",
"1",
"-",
"dimensional",
"arrays",
"given",
"their",
"values",
"and",
"uncertainties",
".",
"This",
"assumes",
"the",
"covariance",
"=",
"0",
"and",
"that",
"the",
"input",
"unc... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L429-L453 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | run_estimators | def run_estimators(ns_run, estimator_list, simulate=False):
"""Calculates values of list of quantities (such as the Bayesian evidence
or mean of parameters) for a single nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
estimator_list: list of functions for estimating quantities from nested
sampling runs. Example functions can be found in estimators.py. Each
should have arguments: func(ns_run, logw=None).
simulate: bool, optional
See get_logw docstring.
Returns
-------
output: 1d numpy array
Calculation result for each estimator in estimator_list.
"""
logw = get_logw(ns_run, simulate=simulate)
output = np.zeros(len(estimator_list))
for i, est in enumerate(estimator_list):
output[i] = est(ns_run, logw=logw)
return output | python | def run_estimators(ns_run, estimator_list, simulate=False):
"""Calculates values of list of quantities (such as the Bayesian evidence
or mean of parameters) for a single nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
estimator_list: list of functions for estimating quantities from nested
sampling runs. Example functions can be found in estimators.py. Each
should have arguments: func(ns_run, logw=None).
simulate: bool, optional
See get_logw docstring.
Returns
-------
output: 1d numpy array
Calculation result for each estimator in estimator_list.
"""
logw = get_logw(ns_run, simulate=simulate)
output = np.zeros(len(estimator_list))
for i, est in enumerate(estimator_list):
output[i] = est(ns_run, logw=logw)
return output | [
"def",
"run_estimators",
"(",
"ns_run",
",",
"estimator_list",
",",
"simulate",
"=",
"False",
")",
":",
"logw",
"=",
"get_logw",
"(",
"ns_run",
",",
"simulate",
"=",
"simulate",
")",
"output",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"estimator_list",
"... | Calculates values of list of quantities (such as the Bayesian evidence
or mean of parameters) for a single nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
estimator_list: list of functions for estimating quantities from nested
sampling runs. Example functions can be found in estimators.py. Each
should have arguments: func(ns_run, logw=None).
simulate: bool, optional
See get_logw docstring.
Returns
-------
output: 1d numpy array
Calculation result for each estimator in estimator_list. | [
"Calculates",
"values",
"of",
"list",
"of",
"quantities",
"(",
"such",
"as",
"the",
"Bayesian",
"evidence",
"or",
"mean",
"of",
"parameters",
")",
"for",
"a",
"single",
"nested",
"sampling",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L15-L39 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | array_given_run | def array_given_run(ns_run):
"""Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
samples: 2d numpy array
Array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
"""
samples = np.zeros((ns_run['logl'].shape[0], 3 + ns_run['theta'].shape[1]))
samples[:, 0] = ns_run['logl']
samples[:, 1] = ns_run['thread_labels']
# Calculate 'change in nlive' after each step
samples[:-1, 2] = np.diff(ns_run['nlive_array'])
samples[-1, 2] = -1 # nlive drops to zero after final point
samples[:, 3:] = ns_run['theta']
return samples | python | def array_given_run(ns_run):
"""Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
samples: 2d numpy array
Array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
"""
samples = np.zeros((ns_run['logl'].shape[0], 3 + ns_run['theta'].shape[1]))
samples[:, 0] = ns_run['logl']
samples[:, 1] = ns_run['thread_labels']
# Calculate 'change in nlive' after each step
samples[:-1, 2] = np.diff(ns_run['nlive_array'])
samples[-1, 2] = -1 # nlive drops to zero after final point
samples[:, 3:] = ns_run['theta']
return samples | [
"def",
"array_given_run",
"(",
"ns_run",
")",
":",
"samples",
"=",
"np",
".",
"zeros",
"(",
"(",
"ns_run",
"[",
"'logl'",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"3",
"+",
"ns_run",
"[",
"'theta'",
"]",
".",
"shape",
"[",
"1",
"]",
")",
")",
"sa... | Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
samples: 2d numpy array
Array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample. | [
"Converts",
"information",
"on",
"samples",
"in",
"a",
"nested",
"sampling",
"run",
"dictionary",
"into",
"a",
"numpy",
"array",
"representation",
".",
"This",
"allows",
"fast",
"addition",
"of",
"more",
"samples",
"and",
"recalculation",
"of",
"nlive",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L42-L67 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | dict_given_run_array | def dict_given_run_array(samples, thread_min_max):
"""
Converts an array of information about samples back into a nested sampling
run dictionary (see data_processing module docstring for more details).
N.B. the output dict only contains the following keys: 'logl',
'thread_label', 'nlive_array', 'theta'. Any other keys giving additional
information about the run output cannot be reproduced from the function
arguments, and are therefore ommitted.
Parameters
----------
samples: numpy array
Numpy array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
thread_min_max': numpy array, optional
2d array with a row for each thread containing the likelihoods at which
it begins and ends.
Needed to calculate nlive_array (otherwise this is set to None).
Returns
-------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
ns_run = {'logl': samples[:, 0],
'thread_labels': samples[:, 1],
'thread_min_max': thread_min_max,
'theta': samples[:, 3:]}
if np.all(~np.isnan(ns_run['thread_labels'])):
ns_run['thread_labels'] = ns_run['thread_labels'].astype(int)
assert np.array_equal(samples[:, 1], ns_run['thread_labels']), ((
'Casting thread labels from samples array to int has changed '
'their values!\nsamples[:, 1]={}\nthread_labels={}').format(
samples[:, 1], ns_run['thread_labels']))
nlive_0 = (thread_min_max[:, 0] <= ns_run['logl'].min()).sum()
assert nlive_0 > 0, 'nlive_0={}'.format(nlive_0)
nlive_array = np.zeros(samples.shape[0]) + nlive_0
nlive_array[1:] += np.cumsum(samples[:-1, 2])
# Check if there are multiple threads starting on the first logl point
dup_th_starts = (thread_min_max[:, 0] == ns_run['logl'].min()).sum()
if dup_th_starts > 1:
# In this case we approximate the true nlive (which we dont really
# know) by making sure the array's final point is 1 and setting all
# points with logl = logl.min() to have the same nlive
nlive_array += (1 - nlive_array[-1])
n_logl_min = (ns_run['logl'] == ns_run['logl'].min()).sum()
nlive_array[:n_logl_min] = nlive_0
warnings.warn((
'duplicate starting logls: {} threads start at logl.min()={}, '
'and {} points have logl=logl.min(). nlive_array may only be '
'approximately correct.').format(
dup_th_starts, ns_run['logl'].min(), n_logl_min), UserWarning)
assert nlive_array.min() > 0, ((
'nlive contains 0s or negative values. nlive_0={}'
'\nnlive_array = {}\nthread_min_max={}').format(
nlive_0, nlive_array, thread_min_max))
assert nlive_array[-1] == 1, (
'final point in nlive_array != 1.\nnlive_array = ' + str(nlive_array))
ns_run['nlive_array'] = nlive_array
return ns_run | python | def dict_given_run_array(samples, thread_min_max):
"""
Converts an array of information about samples back into a nested sampling
run dictionary (see data_processing module docstring for more details).
N.B. the output dict only contains the following keys: 'logl',
'thread_label', 'nlive_array', 'theta'. Any other keys giving additional
information about the run output cannot be reproduced from the function
arguments, and are therefore ommitted.
Parameters
----------
samples: numpy array
Numpy array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
thread_min_max': numpy array, optional
2d array with a row for each thread containing the likelihoods at which
it begins and ends.
Needed to calculate nlive_array (otherwise this is set to None).
Returns
-------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
ns_run = {'logl': samples[:, 0],
'thread_labels': samples[:, 1],
'thread_min_max': thread_min_max,
'theta': samples[:, 3:]}
if np.all(~np.isnan(ns_run['thread_labels'])):
ns_run['thread_labels'] = ns_run['thread_labels'].astype(int)
assert np.array_equal(samples[:, 1], ns_run['thread_labels']), ((
'Casting thread labels from samples array to int has changed '
'their values!\nsamples[:, 1]={}\nthread_labels={}').format(
samples[:, 1], ns_run['thread_labels']))
nlive_0 = (thread_min_max[:, 0] <= ns_run['logl'].min()).sum()
assert nlive_0 > 0, 'nlive_0={}'.format(nlive_0)
nlive_array = np.zeros(samples.shape[0]) + nlive_0
nlive_array[1:] += np.cumsum(samples[:-1, 2])
# Check if there are multiple threads starting on the first logl point
dup_th_starts = (thread_min_max[:, 0] == ns_run['logl'].min()).sum()
if dup_th_starts > 1:
# In this case we approximate the true nlive (which we dont really
# know) by making sure the array's final point is 1 and setting all
# points with logl = logl.min() to have the same nlive
nlive_array += (1 - nlive_array[-1])
n_logl_min = (ns_run['logl'] == ns_run['logl'].min()).sum()
nlive_array[:n_logl_min] = nlive_0
warnings.warn((
'duplicate starting logls: {} threads start at logl.min()={}, '
'and {} points have logl=logl.min(). nlive_array may only be '
'approximately correct.').format(
dup_th_starts, ns_run['logl'].min(), n_logl_min), UserWarning)
assert nlive_array.min() > 0, ((
'nlive contains 0s or negative values. nlive_0={}'
'\nnlive_array = {}\nthread_min_max={}').format(
nlive_0, nlive_array, thread_min_max))
assert nlive_array[-1] == 1, (
'final point in nlive_array != 1.\nnlive_array = ' + str(nlive_array))
ns_run['nlive_array'] = nlive_array
return ns_run | [
"def",
"dict_given_run_array",
"(",
"samples",
",",
"thread_min_max",
")",
":",
"ns_run",
"=",
"{",
"'logl'",
":",
"samples",
"[",
":",
",",
"0",
"]",
",",
"'thread_labels'",
":",
"samples",
"[",
":",
",",
"1",
"]",
",",
"'thread_min_max'",
":",
"thread_... | Converts an array of information about samples back into a nested sampling
run dictionary (see data_processing module docstring for more details).
N.B. the output dict only contains the following keys: 'logl',
'thread_label', 'nlive_array', 'theta'. Any other keys giving additional
information about the run output cannot be reproduced from the function
arguments, and are therefore ommitted.
Parameters
----------
samples: numpy array
Numpy array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
thread_min_max': numpy array, optional
2d array with a row for each thread containing the likelihoods at which
it begins and ends.
Needed to calculate nlive_array (otherwise this is set to None).
Returns
-------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | [
"Converts",
"an",
"array",
"of",
"information",
"about",
"samples",
"back",
"into",
"a",
"nested",
"sampling",
"run",
"dictionary",
"(",
"see",
"data_processing",
"module",
"docstring",
"for",
"more",
"details",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L70-L132 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | get_run_threads | def get_run_threads(ns_run):
"""
Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
"""
samples = array_given_run(ns_run)
unique_threads = np.unique(ns_run['thread_labels'])
assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0], (
'some threads have no points! {0} != {1}'.format(
unique_threads.shape[0], ns_run['thread_min_max'].shape[0]))
threads = []
for i, th_lab in enumerate(unique_threads):
thread_array = samples[np.where(samples[:, 1] == th_lab)]
# delete changes in nlive due to other threads in the run
thread_array[:, 2] = 0
thread_array[-1, 2] = -1
min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2))
assert min_max[0, 1] == thread_array[-1, 0], (
'thread max logl should equal logl of its final point!')
threads.append(dict_given_run_array(thread_array, min_max))
return threads | python | def get_run_threads(ns_run):
"""
Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
"""
samples = array_given_run(ns_run)
unique_threads = np.unique(ns_run['thread_labels'])
assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0], (
'some threads have no points! {0} != {1}'.format(
unique_threads.shape[0], ns_run['thread_min_max'].shape[0]))
threads = []
for i, th_lab in enumerate(unique_threads):
thread_array = samples[np.where(samples[:, 1] == th_lab)]
# delete changes in nlive due to other threads in the run
thread_array[:, 2] = 0
thread_array[-1, 2] = -1
min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2))
assert min_max[0, 1] == thread_array[-1, 0], (
'thread max logl should equal logl of its final point!')
threads.append(dict_given_run_array(thread_array, min_max))
return threads | [
"def",
"get_run_threads",
"(",
"ns_run",
")",
":",
"samples",
"=",
"array_given_run",
"(",
"ns_run",
")",
"unique_threads",
"=",
"np",
".",
"unique",
"(",
"ns_run",
"[",
"'thread_labels'",
"]",
")",
"assert",
"ns_run",
"[",
"'thread_min_max'",
"]",
".",
"sha... | Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample. | [
"Get",
"the",
"individual",
"threads",
"from",
"a",
"nested",
"sampling",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L135-L167 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | combine_ns_runs | def combine_ns_runs(run_list_in, **kwargs):
"""
Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
run_list = copy.deepcopy(run_list_in)
if len(run_list) == 1:
run = run_list[0]
else:
nthread_tot = 0
for i, _ in enumerate(run_list):
check_ns_run(run_list[i], **kwargs)
run_list[i]['thread_labels'] += nthread_tot
nthread_tot += run_list[i]['thread_min_max'].shape[0]
thread_min_max = np.vstack([run['thread_min_max'] for run in run_list])
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(run) for run in run_list])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# Make combined run
run = dict_given_run_array(samples_temp, thread_min_max)
# Combine only the additive properties stored in run['output']
run['output'] = {}
for key in ['nlike', 'ndead']:
try:
run['output'][key] = sum([temp['output'][key] for temp in
run_list_in])
except KeyError:
pass
check_ns_run(run, **kwargs)
return run | python | def combine_ns_runs(run_list_in, **kwargs):
"""
Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
run_list = copy.deepcopy(run_list_in)
if len(run_list) == 1:
run = run_list[0]
else:
nthread_tot = 0
for i, _ in enumerate(run_list):
check_ns_run(run_list[i], **kwargs)
run_list[i]['thread_labels'] += nthread_tot
nthread_tot += run_list[i]['thread_min_max'].shape[0]
thread_min_max = np.vstack([run['thread_min_max'] for run in run_list])
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(run) for run in run_list])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# Make combined run
run = dict_given_run_array(samples_temp, thread_min_max)
# Combine only the additive properties stored in run['output']
run['output'] = {}
for key in ['nlike', 'ndead']:
try:
run['output'][key] = sum([temp['output'][key] for temp in
run_list_in])
except KeyError:
pass
check_ns_run(run, **kwargs)
return run | [
"def",
"combine_ns_runs",
"(",
"run_list_in",
",",
"*",
"*",
"kwargs",
")",
":",
"run_list",
"=",
"copy",
".",
"deepcopy",
"(",
"run_list_in",
")",
"if",
"len",
"(",
"run_list",
")",
"==",
"1",
":",
"run",
"=",
"run_list",
"[",
"0",
"]",
"else",
":",... | Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | [
"Combine",
"a",
"list",
"of",
"complete",
"nested",
"sampling",
"run",
"dictionaries",
"into",
"a",
"single",
"ns",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L170-L215 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | combine_threads | def combine_threads(threads, assert_birth_point=False):
"""
Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
thread_min_max = np.vstack([td['thread_min_max'] for td in threads])
assert len(threads) == thread_min_max.shape[0]
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(thread) for thread in threads])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# update the changes in live points column for threads which start part way
# through the run. These are only present in dynamic nested sampling.
logl_starts = thread_min_max[:, 0]
state = np.random.get_state() # save random state
np.random.seed(0) # seed to make sure any random assignment is repoducable
for logl_start in logl_starts[logl_starts != -np.inf]:
ind = np.where(samples_temp[:, 0] == logl_start)[0]
if assert_birth_point:
assert ind.shape == (1,), \
'No unique birth point! ' + str(ind.shape)
if ind.shape == (1,):
# If the point at which this thread started is present exactly
# once in this bootstrap replication:
samples_temp[ind[0], 2] += 1
elif ind.shape == (0,):
# If the point with the likelihood at which the thread started
# is not present in this particular bootstrap replication,
# approximate it with the point with the nearest likelihood.
ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start))
samples_temp[ind_closest, 2] += 1
else:
# If the point at which this thread started is present multiple
# times in this bootstrap replication, select one at random to
# increment nlive on. This avoids any systematic bias from e.g.
# always choosing the first point.
samples_temp[np.random.choice(ind), 2] += 1
np.random.set_state(state)
# make run
ns_run = dict_given_run_array(samples_temp, thread_min_max)
try:
check_ns_run_threads(ns_run)
except AssertionError:
# If the threads are not valid (e.g. for bootstrap resamples) then
# set them to None so they can't be accidentally used
ns_run['thread_labels'] = None
ns_run['thread_min_max'] = None
return ns_run | python | def combine_threads(threads, assert_birth_point=False):
"""
Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
thread_min_max = np.vstack([td['thread_min_max'] for td in threads])
assert len(threads) == thread_min_max.shape[0]
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(thread) for thread in threads])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# update the changes in live points column for threads which start part way
# through the run. These are only present in dynamic nested sampling.
logl_starts = thread_min_max[:, 0]
state = np.random.get_state() # save random state
np.random.seed(0) # seed to make sure any random assignment is repoducable
for logl_start in logl_starts[logl_starts != -np.inf]:
ind = np.where(samples_temp[:, 0] == logl_start)[0]
if assert_birth_point:
assert ind.shape == (1,), \
'No unique birth point! ' + str(ind.shape)
if ind.shape == (1,):
# If the point at which this thread started is present exactly
# once in this bootstrap replication:
samples_temp[ind[0], 2] += 1
elif ind.shape == (0,):
# If the point with the likelihood at which the thread started
# is not present in this particular bootstrap replication,
# approximate it with the point with the nearest likelihood.
ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start))
samples_temp[ind_closest, 2] += 1
else:
# If the point at which this thread started is present multiple
# times in this bootstrap replication, select one at random to
# increment nlive on. This avoids any systematic bias from e.g.
# always choosing the first point.
samples_temp[np.random.choice(ind), 2] += 1
np.random.set_state(state)
# make run
ns_run = dict_given_run_array(samples_temp, thread_min_max)
try:
check_ns_run_threads(ns_run)
except AssertionError:
# If the threads are not valid (e.g. for bootstrap resamples) then
# set them to None so they can't be accidentally used
ns_run['thread_labels'] = None
ns_run['thread_min_max'] = None
return ns_run | [
"def",
"combine_threads",
"(",
"threads",
",",
"assert_birth_point",
"=",
"False",
")",
":",
"thread_min_max",
"=",
"np",
".",
"vstack",
"(",
"[",
"td",
"[",
"'thread_min_max'",
"]",
"for",
"td",
"in",
"threads",
"]",
")",
"assert",
"len",
"(",
"threads",
... | Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details). | [
"Combine",
"list",
"of",
"threads",
"into",
"a",
"single",
"ns",
"run",
".",
"This",
"is",
"different",
"to",
"combining",
"runs",
"as",
"repeated",
"threads",
"are",
"allowed",
"and",
"as",
"some",
"threads",
"can",
"start",
"from",
"log",
"-",
"likelihoo... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L218-L286 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | get_logw | def get_logw(ns_run, simulate=False):
r"""Calculates the log posterior weights of the samples (using logarithms
to avoid overflow errors with very large or small values).
Uses the trapezium rule such that the weight of point i is
.. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
false their expected values are used).
Returns
-------
logw: 1d numpy array
Log posterior masses of points.
"""
try:
# find logX value for each point
logx = get_logx(ns_run['nlive_array'], simulate=simulate)
logw = np.zeros(ns_run['logl'].shape[0])
# Vectorized trapezium rule: w_i prop to (X_{i-1} - X_{i+1}) / 2
logw[1:-1] = log_subtract(logx[:-2], logx[2:]) - np.log(2)
# Assign all prior volume closest to first point X_first to that point:
# that is from logx=0 to logx=log((X_first + X_second) / 2)
logw[0] = log_subtract(0, scipy.special.logsumexp([logx[0], logx[1]]) -
np.log(2))
# Assign all prior volume closest to final point X_last to that point:
# that is from logx=log((X_penultimate + X_last) / 2) to logx=-inf
logw[-1] = scipy.special.logsumexp([logx[-2], logx[-1]]) - np.log(2)
# multiply by likelihood (add in log space)
logw += ns_run['logl']
return logw
except IndexError:
if ns_run['logl'].shape[0] == 1:
# If there is only one point in the run then assign all prior
# volume X \in (0, 1) to that point, so the weight is just
# 1 * logl_0 = logl_0
return copy.deepcopy(ns_run['logl'])
else:
raise | python | def get_logw(ns_run, simulate=False):
r"""Calculates the log posterior weights of the samples (using logarithms
to avoid overflow errors with very large or small values).
Uses the trapezium rule such that the weight of point i is
.. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
false their expected values are used).
Returns
-------
logw: 1d numpy array
Log posterior masses of points.
"""
try:
# find logX value for each point
logx = get_logx(ns_run['nlive_array'], simulate=simulate)
logw = np.zeros(ns_run['logl'].shape[0])
# Vectorized trapezium rule: w_i prop to (X_{i-1} - X_{i+1}) / 2
logw[1:-1] = log_subtract(logx[:-2], logx[2:]) - np.log(2)
# Assign all prior volume closest to first point X_first to that point:
# that is from logx=0 to logx=log((X_first + X_second) / 2)
logw[0] = log_subtract(0, scipy.special.logsumexp([logx[0], logx[1]]) -
np.log(2))
# Assign all prior volume closest to final point X_last to that point:
# that is from logx=log((X_penultimate + X_last) / 2) to logx=-inf
logw[-1] = scipy.special.logsumexp([logx[-2], logx[-1]]) - np.log(2)
# multiply by likelihood (add in log space)
logw += ns_run['logl']
return logw
except IndexError:
if ns_run['logl'].shape[0] == 1:
# If there is only one point in the run then assign all prior
# volume X \in (0, 1) to that point, so the weight is just
# 1 * logl_0 = logl_0
return copy.deepcopy(ns_run['logl'])
else:
raise | [
"def",
"get_logw",
"(",
"ns_run",
",",
"simulate",
"=",
"False",
")",
":",
"try",
":",
"# find logX value for each point",
"logx",
"=",
"get_logx",
"(",
"ns_run",
"[",
"'nlive_array'",
"]",
",",
"simulate",
"=",
"simulate",
")",
"logw",
"=",
"np",
".",
"ze... | r"""Calculates the log posterior weights of the samples (using logarithms
to avoid overflow errors with very large or small values).
Uses the trapezium rule such that the weight of point i is
.. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
false their expected values are used).
Returns
-------
logw: 1d numpy array
Log posterior masses of points. | [
"r",
"Calculates",
"the",
"log",
"posterior",
"weights",
"of",
"the",
"samples",
"(",
"using",
"logarithms",
"to",
"avoid",
"overflow",
"errors",
"with",
"very",
"large",
"or",
"small",
"values",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L289-L334 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | get_w_rel | def get_w_rel(ns_run, simulate=False):
"""Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
See the get_logw docstring for more details.
Returns
-------
w_rel: 1d numpy array
Relative posterior masses of points.
"""
logw = get_logw(ns_run, simulate=simulate)
return np.exp(logw - logw.max()) | python | def get_w_rel(ns_run, simulate=False):
"""Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
See the get_logw docstring for more details.
Returns
-------
w_rel: 1d numpy array
Relative posterior masses of points.
"""
logw = get_logw(ns_run, simulate=simulate)
return np.exp(logw - logw.max()) | [
"def",
"get_w_rel",
"(",
"ns_run",
",",
"simulate",
"=",
"False",
")",
":",
"logw",
"=",
"get_logw",
"(",
"ns_run",
",",
"simulate",
"=",
"simulate",
")",
"return",
"np",
".",
"exp",
"(",
"logw",
"-",
"logw",
".",
"max",
"(",
")",
")"
] | Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
See the get_logw docstring for more details.
Returns
-------
w_rel: 1d numpy array
Relative posterior masses of points. | [
"Get",
"the",
"relative",
"posterior",
"weights",
"of",
"the",
"samples",
"normalised",
"so",
"the",
"maximum",
"sample",
"weight",
"is",
"1",
".",
"This",
"is",
"calculated",
"from",
"get_logw",
"with",
"protection",
"against",
"numerical",
"overflows",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L337-L356 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | get_logx | def get_logx(nlive, simulate=False):
r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points.
We are interested in
.. math:: \log(t_i) = \log X_{i-1} - \log X_{i}
which has expected value :math:`-1/n_i`.
Parameters
----------
nlive_array: 1d numpy array
Ordered local number of live points present at each point's
iso-likelihood contour.
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
False their expected values are used).
Returns
-------
logx: 1d numpy array
log X values for points.
"""
assert nlive.min() > 0, (
'nlive contains zeros or negative values! nlive = ' + str(nlive))
if simulate:
logx_steps = np.log(np.random.random(nlive.shape)) / nlive
else:
logx_steps = -1 * (nlive.astype(float) ** -1)
return np.cumsum(logx_steps) | python | def get_logx(nlive, simulate=False):
r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points.
We are interested in
.. math:: \log(t_i) = \log X_{i-1} - \log X_{i}
which has expected value :math:`-1/n_i`.
Parameters
----------
nlive_array: 1d numpy array
Ordered local number of live points present at each point's
iso-likelihood contour.
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
False their expected values are used).
Returns
-------
logx: 1d numpy array
log X values for points.
"""
assert nlive.min() > 0, (
'nlive contains zeros or negative values! nlive = ' + str(nlive))
if simulate:
logx_steps = np.log(np.random.random(nlive.shape)) / nlive
else:
logx_steps = -1 * (nlive.astype(float) ** -1)
return np.cumsum(logx_steps) | [
"def",
"get_logx",
"(",
"nlive",
",",
"simulate",
"=",
"False",
")",
":",
"assert",
"nlive",
".",
"min",
"(",
")",
">",
"0",
",",
"(",
"'nlive contains zeros or negative values! nlive = '",
"+",
"str",
"(",
"nlive",
")",
")",
"if",
"simulate",
":",
"logx_s... | r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points.
We are interested in
.. math:: \log(t_i) = \log X_{i-1} - \log X_{i}
which has expected value :math:`-1/n_i`.
Parameters
----------
nlive_array: 1d numpy array
Ordered local number of live points present at each point's
iso-likelihood contour.
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
False their expected values are used).
Returns
-------
logx: 1d numpy array
log X values for points. | [
"r",
"Returns",
"a",
"logx",
"vector",
"showing",
"the",
"expected",
"or",
"simulated",
"logx",
"positions",
"of",
"points",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L359-L396 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | log_subtract | def log_subtract(loga, logb):
r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float
"""
return loga + np.log(1 - np.exp(logb - loga)) | python | def log_subtract(loga, logb):
r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float
"""
return loga + np.log(1 - np.exp(logb - loga)) | [
"def",
"log_subtract",
"(",
"loga",
",",
"logb",
")",
":",
"return",
"loga",
"+",
"np",
".",
"log",
"(",
"1",
"-",
"np",
".",
"exp",
"(",
"logb",
"-",
"loga",
")",
")"
] | r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float | [
"r",
"Numerically",
"stable",
"method",
"for",
"avoiding",
"overflow",
"errors",
"when",
"calculating",
":",
"math",
":",
"\\",
"log",
"(",
"a",
"-",
"b",
")",
"given",
":",
"math",
":",
"\\",
"log",
"(",
"a",
")",
":",
"math",
":",
"\\",
"log",
"(... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L399-L417 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | check_ns_run | def check_ns_run(run, dup_assert=False, dup_warn=False):
"""Checks a nestcheck format nested sampling run dictionary has the
expected properties (see the data_processing module docstring for more
details).
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
See check_ns_run_logls docstring.
dup_warn: bool, optional
See check_ns_run_logls docstring.
Raises
------
AssertionError
if run does not have expected properties.
"""
assert isinstance(run, dict)
check_ns_run_members(run)
check_ns_run_logls(run, dup_assert=dup_assert, dup_warn=dup_warn)
check_ns_run_threads(run) | python | def check_ns_run(run, dup_assert=False, dup_warn=False):
"""Checks a nestcheck format nested sampling run dictionary has the
expected properties (see the data_processing module docstring for more
details).
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
See check_ns_run_logls docstring.
dup_warn: bool, optional
See check_ns_run_logls docstring.
Raises
------
AssertionError
if run does not have expected properties.
"""
assert isinstance(run, dict)
check_ns_run_members(run)
check_ns_run_logls(run, dup_assert=dup_assert, dup_warn=dup_warn)
check_ns_run_threads(run) | [
"def",
"check_ns_run",
"(",
"run",
",",
"dup_assert",
"=",
"False",
",",
"dup_warn",
"=",
"False",
")",
":",
"assert",
"isinstance",
"(",
"run",
",",
"dict",
")",
"check_ns_run_members",
"(",
"run",
")",
"check_ns_run_logls",
"(",
"run",
",",
"dup_assert",
... | Checks a nestcheck format nested sampling run dictionary has the
expected properties (see the data_processing module docstring for more
details).
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
See check_ns_run_logls docstring.
dup_warn: bool, optional
See check_ns_run_logls docstring.
Raises
------
AssertionError
if run does not have expected properties. | [
"Checks",
"a",
"nestcheck",
"format",
"nested",
"sampling",
"run",
"dictionary",
"has",
"the",
"expected",
"properties",
"(",
"see",
"the",
"data_processing",
"module",
"docstring",
"for",
"more",
"details",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L424-L447 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | check_ns_run_members | def check_ns_run_members(run):
"""Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties.
"""
run_keys = list(run.keys())
# Mandatory keys
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert key in run_keys
run_keys.remove(key)
# Optional keys
for key in ['output']:
try:
run_keys.remove(key)
except ValueError:
pass
# Check for unexpected keys
assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys)
# Check type of mandatory members
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert isinstance(run[key], np.ndarray), (
key + ' is type ' + type(run[key]).__name__)
# check shapes of keys
assert run['logl'].ndim == 1
assert run['logl'].shape == run['nlive_array'].shape
assert run['logl'].shape == run['thread_labels'].shape
assert run['theta'].ndim == 2
assert run['logl'].shape[0] == run['theta'].shape[0] | python | def check_ns_run_members(run):
"""Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties.
"""
run_keys = list(run.keys())
# Mandatory keys
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert key in run_keys
run_keys.remove(key)
# Optional keys
for key in ['output']:
try:
run_keys.remove(key)
except ValueError:
pass
# Check for unexpected keys
assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys)
# Check type of mandatory members
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert isinstance(run[key], np.ndarray), (
key + ' is type ' + type(run[key]).__name__)
# check shapes of keys
assert run['logl'].ndim == 1
assert run['logl'].shape == run['nlive_array'].shape
assert run['logl'].shape == run['thread_labels'].shape
assert run['theta'].ndim == 2
assert run['logl'].shape[0] == run['theta'].shape[0] | [
"def",
"check_ns_run_members",
"(",
"run",
")",
":",
"run_keys",
"=",
"list",
"(",
"run",
".",
"keys",
"(",
")",
")",
"# Mandatory keys",
"for",
"key",
"in",
"[",
"'logl'",
",",
"'nlive_array'",
",",
"'theta'",
",",
"'thread_labels'",
",",
"'thread_min_max'"... | Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties. | [
"Check",
"nested",
"sampling",
"run",
"member",
"keys",
"and",
"values",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L450-L487 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | check_ns_run_logls | def check_ns_run_logls(run, dup_assert=False, dup_warn=False):
"""Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if there are duplicate logl values (only
used if dup_assert is False).
Raises
------
AssertionError
if run does not have expected properties.
"""
assert np.array_equal(run['logl'], run['logl'][np.argsort(run['logl'])])
if dup_assert or dup_warn:
unique_logls, counts = np.unique(run['logl'], return_counts=True)
repeat_logls = run['logl'].shape[0] - unique_logls.shape[0]
msg = ('{} duplicate logl values (out of a total of {}). This may be '
'caused by limited numerical precision in the output files.'
'\nrepeated logls = {}\ncounts = {}\npositions in list of {}'
' unique logls = {}').format(
repeat_logls, run['logl'].shape[0],
unique_logls[counts != 1], counts[counts != 1],
unique_logls.shape[0], np.where(counts != 1)[0])
if dup_assert:
assert repeat_logls == 0, msg
elif dup_warn:
if repeat_logls != 0:
warnings.warn(msg, UserWarning) | python | def check_ns_run_logls(run, dup_assert=False, dup_warn=False):
"""Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if there are duplicate logl values (only
used if dup_assert is False).
Raises
------
AssertionError
if run does not have expected properties.
"""
assert np.array_equal(run['logl'], run['logl'][np.argsort(run['logl'])])
if dup_assert or dup_warn:
unique_logls, counts = np.unique(run['logl'], return_counts=True)
repeat_logls = run['logl'].shape[0] - unique_logls.shape[0]
msg = ('{} duplicate logl values (out of a total of {}). This may be '
'caused by limited numerical precision in the output files.'
'\nrepeated logls = {}\ncounts = {}\npositions in list of {}'
' unique logls = {}').format(
repeat_logls, run['logl'].shape[0],
unique_logls[counts != 1], counts[counts != 1],
unique_logls.shape[0], np.where(counts != 1)[0])
if dup_assert:
assert repeat_logls == 0, msg
elif dup_warn:
if repeat_logls != 0:
warnings.warn(msg, UserWarning) | [
"def",
"check_ns_run_logls",
"(",
"run",
",",
"dup_assert",
"=",
"False",
",",
"dup_warn",
"=",
"False",
")",
":",
"assert",
"np",
".",
"array_equal",
"(",
"run",
"[",
"'logl'",
"]",
",",
"run",
"[",
"'logl'",
"]",
"[",
"np",
".",
"argsort",
"(",
"ru... | Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if there are duplicate logl values (only
used if dup_assert is False).
Raises
------
AssertionError
if run does not have expected properties. | [
"Check",
"run",
"logls",
"are",
"unique",
"and",
"in",
"the",
"correct",
"order",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L490-L523 | train |
ejhigson/nestcheck | nestcheck/ns_run_utils.py | check_ns_run_threads | def check_ns_run_threads(run):
"""Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties.
"""
assert run['thread_labels'].dtype == int
uniq_th = np.unique(run['thread_labels'])
assert np.array_equal(
np.asarray(range(run['thread_min_max'].shape[0])), uniq_th), \
str(uniq_th)
# Check thread_min_max
assert np.any(run['thread_min_max'][:, 0] == -np.inf), (
'Run should have at least one thread which starts by sampling the ' +
'whole prior')
for th_lab in uniq_th:
inds = np.where(run['thread_labels'] == th_lab)[0]
th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format(
th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :])
assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]], (
'First point in thread has logl less than thread min logl! ' +
th_info + ', difference={}'.format(
run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0]))
assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]], (
'Last point in thread logl != thread end logl! ' + th_info) | python | def check_ns_run_threads(run):
"""Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties.
"""
assert run['thread_labels'].dtype == int
uniq_th = np.unique(run['thread_labels'])
assert np.array_equal(
np.asarray(range(run['thread_min_max'].shape[0])), uniq_th), \
str(uniq_th)
# Check thread_min_max
assert np.any(run['thread_min_max'][:, 0] == -np.inf), (
'Run should have at least one thread which starts by sampling the ' +
'whole prior')
for th_lab in uniq_th:
inds = np.where(run['thread_labels'] == th_lab)[0]
th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format(
th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :])
assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]], (
'First point in thread has logl less than thread min logl! ' +
th_info + ', difference={}'.format(
run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0]))
assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]], (
'Last point in thread logl != thread end logl! ' + th_info) | [
"def",
"check_ns_run_threads",
"(",
"run",
")",
":",
"assert",
"run",
"[",
"'thread_labels'",
"]",
".",
"dtype",
"==",
"int",
"uniq_th",
"=",
"np",
".",
"unique",
"(",
"run",
"[",
"'thread_labels'",
"]",
")",
"assert",
"np",
".",
"array_equal",
"(",
"np"... | Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties. | [
"Check",
"thread",
"labels",
"and",
"thread_min_max",
"have",
"expected",
"properties",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L526-L557 | train |
ejhigson/nestcheck | nestcheck/estimators.py | count_samples | def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0] | python | def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0] | [
"def",
"count_samples",
"(",
"ns_run",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"pop",
"(",
"'logw'",
",",
"None",
")",
"kwargs",
".",
"pop",
"(",
"'simulate'",
",",
"None",
")",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'Unexpected **k... | r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int | [
"r",
"Number",
"of",
"samples",
"in",
"run",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L32-L52 | train |
ejhigson/nestcheck | nestcheck/estimators.py | logz | def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw) | python | def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw) | [
"def",
"logz",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_logw",
"(",
"ns_run",
",",
"simulate",
"=",
"simulate",
")",
"r... | r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | [
"r",
"Natural",
"log",
"of",
"Bayesian",
"evidence",
":",
"math",
":",
"\\",
"log",
"\\",
"mathcal",
"{",
"Z",
"}",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L55-L75 | train |
ejhigson/nestcheck | nestcheck/estimators.py | evidence | def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw)) | python | def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw)) | [
"def",
"evidence",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_logw",
"(",
"ns_run",
",",
"simulate",
"=",
"simulate",
")",
... | r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | [
"r",
"Bayesian",
"evidence",
":",
"math",
":",
"\\",
"log",
"\\",
"mathcal",
"{",
"Z",
"}",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L78-L98 | train |
ejhigson/nestcheck | nestcheck/estimators.py | param_mean | def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise | python | def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise | [
"def",
"param_mean",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
",",
"param_ind",
"=",
"0",
",",
"handle_indexerror",
"=",
"False",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
"... | Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float | [
"Mean",
"of",
"a",
"single",
"parameter",
"(",
"single",
"component",
"of",
"theta",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L101-L138 | train |
ejhigson/nestcheck | nestcheck/estimators.py | param_cred | def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative) | python | def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative) | [
"def",
"param_cred",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
",",
"probability",
"=",
"0.5",
",",
"param_ind",
"=",
"0",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"g... | One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | [
"One",
"-",
"tailed",
"credible",
"interval",
"on",
"the",
"value",
"of",
"a",
"single",
"parameter",
"(",
"component",
"of",
"theta",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L141-L173 | train |
ejhigson/nestcheck | nestcheck/estimators.py | param_squared_mean | def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2)) | python | def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2)) | [
"def",
"param_squared_mean",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
",",
"param_ind",
"=",
"0",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_logw",
"(",
"ns_run",
"... | Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float | [
"Mean",
"of",
"the",
"square",
"of",
"single",
"parameter",
"(",
"second",
"moment",
"of",
"its",
"posterior",
"distribution",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L176-L203 | train |
ejhigson/nestcheck | nestcheck/estimators.py | r_mean | def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative) | python | def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative) | [
"def",
"r_mean",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_logw",
"(",
"ns_run",
",",
"simulate",
"=",
"simulate",
")",
... | Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float | [
"Mean",
"of",
"the",
"radial",
"coordinate",
"(",
"magnitude",
"of",
"theta",
"vector",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L206-L228 | train |
ejhigson/nestcheck | nestcheck/estimators.py | r_cred | def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative) | python | def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative) | [
"def",
"r_cred",
"(",
"ns_run",
",",
"logw",
"=",
"None",
",",
"simulate",
"=",
"False",
",",
"probability",
"=",
"0.5",
")",
":",
"if",
"logw",
"is",
"None",
":",
"logw",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_logw",
"(",
"ns_run",
",",
"s... | One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float | [
"One",
"-",
"tailed",
"credible",
"interval",
"on",
"the",
"value",
"of",
"the",
"radial",
"coordinate",
"(",
"magnitude",
"of",
"theta",
"vector",
")",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L231-L258 | train |
ejhigson/nestcheck | nestcheck/estimators.py | get_latex_name | def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise | python | def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise | [
"def",
"get_latex_name",
"(",
"func_in",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"func_in",
",",
"functools",
".",
"partial",
")",
":",
"func",
"=",
"func_in",
".",
"func",
"assert",
"not",
"set",
"(",
"func_in",
".",
"keywords",
")... | Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function. | [
"Produce",
"a",
"latex",
"formatted",
"name",
"for",
"each",
"function",
"for",
"use",
"in",
"labelling",
"results",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L265-L316 | train |
ejhigson/nestcheck | nestcheck/estimators.py | weighted_quantile | def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds]) | python | def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds]) | [
"def",
"weighted_quantile",
"(",
"probability",
",",
"values",
",",
"weights",
")",
":",
"assert",
"1",
">",
"probability",
">",
"0",
",",
"(",
"'credible interval prob= '",
"+",
"str",
"(",
"probability",
")",
"+",
"' not in (0, 1)'",
")",
"assert",
"values",... | Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float | [
"Get",
"quantile",
"estimate",
"for",
"input",
"probability",
"given",
"weighted",
"samples",
"using",
"linear",
"interpolation",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L319-L347 | train |
ejhigson/nestcheck | nestcheck/write_polychord_output.py | write_run_output | def write_run_output(run, **kwargs):
"""Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means.
"""
write_dead = kwargs.pop('write_dead', True)
write_stats = kwargs.pop('write_stats', True)
posteriors = kwargs.pop('posteriors', False)
equals = kwargs.pop('equals', False)
stats_means_errs = kwargs.pop('stats_means_errs', True)
fmt = kwargs.pop('fmt', '% .14E')
n_simulate = kwargs.pop('n_simulate', 100)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run['output'], key + ' not in run["output"]'
root = os.path.join(run['output']['base_dir'], run['output']['file_root'])
if write_dead:
samples = run_dead_birth_array(run)
np.savetxt(root + '_dead-birth.txt', samples, fmt=fmt)
np.savetxt(root + '_dead.txt', samples[:, :-1], fmt=fmt)
if equals or posteriors:
w_rel = nestcheck.ns_run_utils.get_w_rel(run)
post_arr = np.zeros((run['theta'].shape[0], run['theta'].shape[1] + 2))
post_arr[:, 0] = w_rel
post_arr[:, 1] = -2 * run['logl']
post_arr[:, 2:] = run['theta']
if posteriors:
np.savetxt(root + '.txt', post_arr, fmt=fmt)
run['output']['nposterior'] = post_arr.shape[0]
else:
run['output']['nposterior'] = 0
if equals:
inds = np.where(w_rel > np.random.random(w_rel.shape[0]))[0]
np.savetxt(root + '_equal_weights.txt', post_arr[inds, 1:],
fmt=fmt)
run['output']['nequals'] = inds.shape[0]
else:
run['output']['nequals'] = 0
if write_stats:
run['output']['ndead'] = run['logl'].shape[0]
if stats_means_errs:
# Get logZ and param estimates and errors
estimators = [e.logz]
for i in range(run['theta'].shape[1]):
estimators.append(functools.partial(e.param_mean, param_ind=i))
values = nestcheck.ns_run_utils.run_estimators(run, estimators)
stds = nestcheck.error_analysis.run_std_bootstrap(
run, estimators, n_simulate=n_simulate)
run['output']['logZ'] = values[0]
run['output']['logZerr'] = stds[0]
run['output']['param_means'] = list(values[1:])
run['output']['param_mean_errs'] = list(stds[1:])
write_stats_file(run['output']) | python | def write_run_output(run, **kwargs):
"""Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means.
"""
write_dead = kwargs.pop('write_dead', True)
write_stats = kwargs.pop('write_stats', True)
posteriors = kwargs.pop('posteriors', False)
equals = kwargs.pop('equals', False)
stats_means_errs = kwargs.pop('stats_means_errs', True)
fmt = kwargs.pop('fmt', '% .14E')
n_simulate = kwargs.pop('n_simulate', 100)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run['output'], key + ' not in run["output"]'
root = os.path.join(run['output']['base_dir'], run['output']['file_root'])
if write_dead:
samples = run_dead_birth_array(run)
np.savetxt(root + '_dead-birth.txt', samples, fmt=fmt)
np.savetxt(root + '_dead.txt', samples[:, :-1], fmt=fmt)
if equals or posteriors:
w_rel = nestcheck.ns_run_utils.get_w_rel(run)
post_arr = np.zeros((run['theta'].shape[0], run['theta'].shape[1] + 2))
post_arr[:, 0] = w_rel
post_arr[:, 1] = -2 * run['logl']
post_arr[:, 2:] = run['theta']
if posteriors:
np.savetxt(root + '.txt', post_arr, fmt=fmt)
run['output']['nposterior'] = post_arr.shape[0]
else:
run['output']['nposterior'] = 0
if equals:
inds = np.where(w_rel > np.random.random(w_rel.shape[0]))[0]
np.savetxt(root + '_equal_weights.txt', post_arr[inds, 1:],
fmt=fmt)
run['output']['nequals'] = inds.shape[0]
else:
run['output']['nequals'] = 0
if write_stats:
run['output']['ndead'] = run['logl'].shape[0]
if stats_means_errs:
# Get logZ and param estimates and errors
estimators = [e.logz]
for i in range(run['theta'].shape[1]):
estimators.append(functools.partial(e.param_mean, param_ind=i))
values = nestcheck.ns_run_utils.run_estimators(run, estimators)
stds = nestcheck.error_analysis.run_std_bootstrap(
run, estimators, n_simulate=n_simulate)
run['output']['logZ'] = values[0]
run['output']['logZerr'] = stds[0]
run['output']['param_means'] = list(values[1:])
run['output']['param_mean_errs'] = list(stds[1:])
write_stats_file(run['output']) | [
"def",
"write_run_output",
"(",
"run",
",",
"*",
"*",
"kwargs",
")",
":",
"write_dead",
"=",
"kwargs",
".",
"pop",
"(",
"'write_dead'",
",",
"True",
")",
"write_stats",
"=",
"kwargs",
".",
"pop",
"(",
"'write_stats'",
",",
"True",
")",
"posteriors",
"=",... | Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
documentation for more information about what each contains):
* [root].stats
* [root].txt
* [root]_equal_weights.txt
* [root]_dead-birth.txt
* [root]_dead.txt
Files produced by PolyChord which are not made by this function:
* [root].resume: for resuming runs part way through (not relevant for a
completed run).
* [root]_phys_live.txt and [root]phys_live-birth.txt: for checking runtime
progress (not relevant for a completed run).
* [root].paramnames: for use with getdist (not needed when calling getdist
from within python).
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
write_dead: bool, optional
Whether or not to write [root]_dead.txt and [root]_dead-birth.txt.
write_stats: bool, optional
Whether or not to write [root].stats.
posteriors: bool, optional
Whether or not to write [root].txt.
equals: bool, optional
Whether or not to write [root]_equal_weights.txt.
stats_means_errs: bool, optional
Whether or not to calculate mean values of :math:`\log \mathcal{Z}` and
each parameter, and their uncertainties.
fmt: str, optional
Formatting for numbers written by np.savetxt. Default value is set to
make output files look like the ones produced by PolyChord.
n_simulate: int, optional
Number of bootstrap replications to use when estimating uncertainty on
evidence and parameter means. | [
"Writes",
"PolyChord",
"output",
"files",
"corresponding",
"to",
"the",
"input",
"nested",
"sampling",
"run",
".",
"The",
"file",
"root",
"is"
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/write_polychord_output.py#L16-L115 | train |
ejhigson/nestcheck | nestcheck/write_polychord_output.py | run_dead_birth_array | def run_dead_birth_array(run, **kwargs):
"""Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
"""
nestcheck.ns_run_utils.check_ns_run(run, **kwargs)
threads = nestcheck.ns_run_utils.get_run_threads(run)
samp_arrays = []
ndim = run['theta'].shape[1]
for th in threads:
samp_arr = np.zeros((th['theta'].shape[0], ndim + 2))
samp_arr[:, :ndim] = th['theta']
samp_arr[:, ndim] = th['logl']
samp_arr[1:, ndim + 1] = th['logl'][:-1]
if th['thread_min_max'][0, 0] == -np.inf:
samp_arr[0, ndim + 1] = -1e30
else:
samp_arr[0, ndim + 1] = th['thread_min_max'][0, 0]
samp_arrays.append(samp_arr)
samples = np.vstack(samp_arrays)
samples = samples[np.argsort(samples[:, ndim]), :]
return samples | python | def run_dead_birth_array(run, **kwargs):
"""Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl
"""
nestcheck.ns_run_utils.check_ns_run(run, **kwargs)
threads = nestcheck.ns_run_utils.get_run_threads(run)
samp_arrays = []
ndim = run['theta'].shape[1]
for th in threads:
samp_arr = np.zeros((th['theta'].shape[0], ndim + 2))
samp_arr[:, :ndim] = th['theta']
samp_arr[:, ndim] = th['logl']
samp_arr[1:, ndim + 1] = th['logl'][:-1]
if th['thread_min_max'][0, 0] == -np.inf:
samp_arr[0, ndim + 1] = -1e30
else:
samp_arr[0, ndim + 1] = th['thread_min_max'][0, 0]
samp_arrays.append(samp_arr)
samples = np.vstack(samp_arrays)
samples = samples[np.argsort(samples[:, ndim]), :]
return samples | [
"def",
"run_dead_birth_array",
"(",
"run",
",",
"*",
"*",
"kwargs",
")",
":",
"nestcheck",
".",
"ns_run_utils",
".",
"check_ns_run",
"(",
"run",
",",
"*",
"*",
"kwargs",
")",
"threads",
"=",
"nestcheck",
".",
"ns_run_utils",
".",
"get_run_threads",
"(",
"r... | Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters + 2 columns:
param_1, param_2, ... , logl, birth_logl | [
"Converts",
"input",
"run",
"into",
"an",
"array",
"of",
"the",
"format",
"of",
"a",
"PolyChord",
"<root",
">",
"_dead",
"-",
"birth",
".",
"txt",
"file",
".",
"Note",
"that",
"this",
"in",
"fact",
"includes",
"live",
"points",
"remaining",
"at",
"termin... | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/write_polychord_output.py#L118-L154 | train |
ejhigson/nestcheck | nestcheck/write_polychord_output.py | write_stats_file | def write_stats_file(run_output_dict):
"""Writes a dummy PolyChord format .stats file for tests functions for
processing stats files. This is written to:
base_dir/file_root.stats
Also returns the data in the file as a dict for comparison.
Parameters
----------
run_output_dict: dict
Output information to write to .stats file. Must contain file_root and
base_dir. If other settings are not specified, default values are used.
Returns
-------
output: dict
The expected output of
nestcheck.process_polychord_stats(file_root, base_dir)
"""
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run_output_dict, key + ' not in run_output_dict'
default_output = {'logZ': 0.0,
'logZerr': 0.0,
'logZs': [0.0],
'logZerrs': [0.0],
'ncluster': 1,
'nposterior': 0,
'nequals': 0,
'ndead': 0,
'nlike': 0,
'nlive': 0,
'avnlike': 0.0,
'avnlikeslice': 0.0,
'param_means': [0.0, 0.0, 0.0],
'param_mean_errs': [0.0, 0.0, 0.0]}
allowed_keys = set(mandatory_keys) | set(default_output.keys())
assert set(run_output_dict.keys()).issubset(allowed_keys), (
'Input dict contains unexpected keys: {}'.format(
set(run_output_dict.keys()) - allowed_keys))
output = copy.deepcopy(run_output_dict)
for key, value in default_output.items():
if key not in output:
output[key] = value
# Make a PolyChord format .stats file corresponding to output
file_lines = [
'Evidence estimates:',
'===================',
(' - The evidence Z is a log-normally distributed, with location and '
'scale parameters mu and sigma.'),
' - We denote this as log(Z) = mu +/- sigma.',
'',
'Global evidence:',
'----------------',
'',
'log(Z) = {0} +/- {1}'.format(
output['logZ'], output['logZerr']),
'',
'',
'Local evidences:',
'----------------',
'']
for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])):
file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format(
str(i + 1).rjust(2), lz, lzerr))
file_lines += [
'',
'',
'Run-time information:',
'---------------------',
'',
' ncluster: 0 / 1',
' nposterior: {0}'.format(output['nposterior']),
' nequals: {0}'.format(output['nequals']),
' ndead: {0}'.format(output['ndead']),
' nlive: {0}'.format(output['nlive']),
' nlike: {0}'.format(output['nlike']),
' <nlike>: {0} ( {1} per slice )'.format(
output['avnlike'], output['avnlikeslice']),
'',
'',
'Dim No. Mean Sigma']
for i, (mean, meanerr) in enumerate(zip(output['param_means'],
output['param_mean_errs'])):
file_lines.append('{0} {1} +/- {2}'.format(
str(i + 1).ljust(3), mean, meanerr))
file_path = os.path.join(output['base_dir'],
output['file_root'] + '.stats')
with open(file_path, 'w') as stats_file:
stats_file.writelines('{}\n'.format(line) for line in file_lines)
return output | python | def write_stats_file(run_output_dict):
"""Writes a dummy PolyChord format .stats file for tests functions for
processing stats files. This is written to:
base_dir/file_root.stats
Also returns the data in the file as a dict for comparison.
Parameters
----------
run_output_dict: dict
Output information to write to .stats file. Must contain file_root and
base_dir. If other settings are not specified, default values are used.
Returns
-------
output: dict
The expected output of
nestcheck.process_polychord_stats(file_root, base_dir)
"""
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run_output_dict, key + ' not in run_output_dict'
default_output = {'logZ': 0.0,
'logZerr': 0.0,
'logZs': [0.0],
'logZerrs': [0.0],
'ncluster': 1,
'nposterior': 0,
'nequals': 0,
'ndead': 0,
'nlike': 0,
'nlive': 0,
'avnlike': 0.0,
'avnlikeslice': 0.0,
'param_means': [0.0, 0.0, 0.0],
'param_mean_errs': [0.0, 0.0, 0.0]}
allowed_keys = set(mandatory_keys) | set(default_output.keys())
assert set(run_output_dict.keys()).issubset(allowed_keys), (
'Input dict contains unexpected keys: {}'.format(
set(run_output_dict.keys()) - allowed_keys))
output = copy.deepcopy(run_output_dict)
for key, value in default_output.items():
if key not in output:
output[key] = value
# Make a PolyChord format .stats file corresponding to output
file_lines = [
'Evidence estimates:',
'===================',
(' - The evidence Z is a log-normally distributed, with location and '
'scale parameters mu and sigma.'),
' - We denote this as log(Z) = mu +/- sigma.',
'',
'Global evidence:',
'----------------',
'',
'log(Z) = {0} +/- {1}'.format(
output['logZ'], output['logZerr']),
'',
'',
'Local evidences:',
'----------------',
'']
for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])):
file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format(
str(i + 1).rjust(2), lz, lzerr))
file_lines += [
'',
'',
'Run-time information:',
'---------------------',
'',
' ncluster: 0 / 1',
' nposterior: {0}'.format(output['nposterior']),
' nequals: {0}'.format(output['nequals']),
' ndead: {0}'.format(output['ndead']),
' nlive: {0}'.format(output['nlive']),
' nlike: {0}'.format(output['nlike']),
' <nlike>: {0} ( {1} per slice )'.format(
output['avnlike'], output['avnlikeslice']),
'',
'',
'Dim No. Mean Sigma']
for i, (mean, meanerr) in enumerate(zip(output['param_means'],
output['param_mean_errs'])):
file_lines.append('{0} {1} +/- {2}'.format(
str(i + 1).ljust(3), mean, meanerr))
file_path = os.path.join(output['base_dir'],
output['file_root'] + '.stats')
with open(file_path, 'w') as stats_file:
stats_file.writelines('{}\n'.format(line) for line in file_lines)
return output | [
"def",
"write_stats_file",
"(",
"run_output_dict",
")",
":",
"mandatory_keys",
"=",
"[",
"'file_root'",
",",
"'base_dir'",
"]",
"for",
"key",
"in",
"mandatory_keys",
":",
"assert",
"key",
"in",
"run_output_dict",
",",
"key",
"+",
"' not in run_output_dict'",
"defa... | Writes a dummy PolyChord format .stats file for tests functions for
processing stats files. This is written to:
base_dir/file_root.stats
Also returns the data in the file as a dict for comparison.
Parameters
----------
run_output_dict: dict
Output information to write to .stats file. Must contain file_root and
base_dir. If other settings are not specified, default values are used.
Returns
-------
output: dict
The expected output of
nestcheck.process_polychord_stats(file_root, base_dir) | [
"Writes",
"a",
"dummy",
"PolyChord",
"format",
".",
"stats",
"file",
"for",
"tests",
"functions",
"for",
"processing",
"stats",
"files",
".",
"This",
"is",
"written",
"to",
":"
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/write_polychord_output.py#L157-L248 | train |
ejhigson/nestcheck | nestcheck/diagnostics_tables.py | run_list_error_values | def run_list_error_values(run_list, estimator_list, estimator_names,
n_simulate=100, **kwargs):
"""Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
"""
thread_pvalue = kwargs.pop('thread_pvalue', False)
bs_stat_dist = kwargs.pop('bs_stat_dist', False)
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# Calculation results
# -------------------
df = estimator_values_df(run_list, estimator_list, parallel=parallel,
estimator_names=estimator_names)
df.index = df.index.map(str)
df['calculation type'] = 'values'
df.set_index('calculation type', drop=True, append=True, inplace=True)
df = df.reorder_levels(['calculation type', 'run'])
# Bootstrap stds
# --------------
# Create bs_vals_df then convert to stds so bs_vals_df does not need to be
# recomputed if bs_stat_dist is True
bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,
n_simulate, parallel=parallel)
bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1))
bs_std_df.index.name = 'run'
bs_std_df['calculation type'] = 'bootstrap std'
bs_std_df.set_index('calculation type', drop=True, append=True,
inplace=True)
bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run'])
df = pd.concat([df, bs_std_df])
# Pairwise KS p-values on threads
# -------------------------------
if thread_pvalue:
t_vals_df = thread_values_df(
run_list, estimator_list, estimator_names, parallel=parallel)
t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,
energy_dist=False)
# Keep only the p value not the distance measures
t_d_df = t_d_df.xs('ks pvalue', level='calculation type',
drop_level=False)
# Append 'thread ' to caclulcation type
t_d_df.index.set_levels(['thread ks pvalue'], level='calculation type',
inplace=True)
df = pd.concat([df, t_d_df])
# Pairwise distances on BS distributions
# --------------------------------------
if bs_stat_dist:
b_d_df = pairwise_dists_on_cols(bs_vals_df)
# Select only statistical distances - not KS pvalue as this is not
# useful for the bootstrap resample distributions (see Higson et al.
# 2019 for more details).
dists = ['ks distance', 'earth mover distance', 'energy distance']
b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]
# Append 'bootstrap ' to caclulcation type
new_ind = ['bootstrap ' +
b_d_df.index.get_level_values('calculation type'),
b_d_df.index.get_level_values('run')]
b_d_df.set_index(new_ind, inplace=True)
df = pd.concat([df, b_d_df])
return df | python | def run_list_error_values(run_list, estimator_list, estimator_names,
n_simulate=100, **kwargs):
"""Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
"""
thread_pvalue = kwargs.pop('thread_pvalue', False)
bs_stat_dist = kwargs.pop('bs_stat_dist', False)
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# Calculation results
# -------------------
df = estimator_values_df(run_list, estimator_list, parallel=parallel,
estimator_names=estimator_names)
df.index = df.index.map(str)
df['calculation type'] = 'values'
df.set_index('calculation type', drop=True, append=True, inplace=True)
df = df.reorder_levels(['calculation type', 'run'])
# Bootstrap stds
# --------------
# Create bs_vals_df then convert to stds so bs_vals_df does not need to be
# recomputed if bs_stat_dist is True
bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,
n_simulate, parallel=parallel)
bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1))
bs_std_df.index.name = 'run'
bs_std_df['calculation type'] = 'bootstrap std'
bs_std_df.set_index('calculation type', drop=True, append=True,
inplace=True)
bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run'])
df = pd.concat([df, bs_std_df])
# Pairwise KS p-values on threads
# -------------------------------
if thread_pvalue:
t_vals_df = thread_values_df(
run_list, estimator_list, estimator_names, parallel=parallel)
t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,
energy_dist=False)
# Keep only the p value not the distance measures
t_d_df = t_d_df.xs('ks pvalue', level='calculation type',
drop_level=False)
# Append 'thread ' to caclulcation type
t_d_df.index.set_levels(['thread ks pvalue'], level='calculation type',
inplace=True)
df = pd.concat([df, t_d_df])
# Pairwise distances on BS distributions
# --------------------------------------
if bs_stat_dist:
b_d_df = pairwise_dists_on_cols(bs_vals_df)
# Select only statistical distances - not KS pvalue as this is not
# useful for the bootstrap resample distributions (see Higson et al.
# 2019 for more details).
dists = ['ks distance', 'earth mover distance', 'energy distance']
b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]
# Append 'bootstrap ' to caclulcation type
new_ind = ['bootstrap ' +
b_d_df.index.get_level_values('calculation type'),
b_d_df.index.get_level_values('run')]
b_d_df.set_index(new_ind, inplace=True)
df = pd.concat([df, b_d_df])
return df | [
"def",
"run_list_error_values",
"(",
"run_list",
",",
"estimator_list",
",",
"estimator_names",
",",
"n_simulate",
"=",
"100",
",",
"*",
"*",
"kwargs",
")",
":",
"thread_pvalue",
"=",
"kwargs",
".",
"pop",
"(",
"'thread_pvalue'",
",",
"False",
")",
"bs_stat_di... | Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list. | [
"Gets",
"a",
"data",
"frame",
"with",
"calculation",
"values",
"and",
"error",
"diagnostics",
"for",
"each",
"run",
"in",
"the",
"input",
"run",
"list",
"."
] | 29151c314deb89746fd674f27f6ce54b77603189 | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/diagnostics_tables.py#L17-L119 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.