repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
Scifabric/pbs | helpers.py | _update_task_presenter_bundle_js | python | def _update_task_presenter_bundle_js(project):
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js | Append to template a distribution bundle js. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L90-L101 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | _update_project | python | def _update_project(config, task_presenter, results,
long_description, tutorial):
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise | Update a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L103-L138 | [
"def find_project_by_short_name(short_name, pbclient, all=None):\n \"\"\"Return project by short_name.\"\"\"\n try:\n response = pbclient.find_project(short_name=short_name, all=all)\n check_api_error(response)\n if (len(response) == 0):\n msg = '%s not found! You can use the all=1 argument to \\\n search in all the server.'\n error = 'Project Not Found'\n raise ProjectNotFound(msg, error)\n return response[0]\n except exceptions.ConnectionError:\n raise\n except ProjectNotFound:\n raise\n",
"def check_api_error(api_response):\n print(api_response)\n \"\"\"Check if returned API response contains an error.\"\"\"\n if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:\n print(\"Server response code: %s\" % api_response['code'])\n print(\"Server response: %s\" % api_response)\n raise exceptions.HTTPError('Unexpected response', response=api_response)\n if type(api_response) == dict and (api_response.get('status') == 'failed'):\n if 'ProgrammingError' in api_response.get('exception_cls'):\n raise DatabaseError(message='PyBossa database error.',\n error=api_response)\n if ('DBIntegrityError' in api_response.get('exception_cls') and\n 'project' in api_response.get('target')):\n msg = 'PyBossa project already exists.'\n raise ProjectAlreadyExists(message=msg, error=api_response)\n if 'project' in api_response.get('target'):\n raise ProjectNotFound(message='PyBossa Project not found',\n error=api_response)\n if 'task' in api_response.get('target'):\n raise TaskNotFound(message='PyBossa Task not found',\n error=api_response)\n else:\n print(\"Server response: %s\" % api_response)\n raise exceptions.HTTPError('Unexpected response', response=api_response)\n",
"def _update_task_presenter_bundle_js(project):\n \"\"\"Append to template a distribution bundle js.\"\"\"\n if os.path.isfile ('bundle.min.js'):\n with open('bundle.min.js') as f:\n js = f.read()\n project.info['task_presenter'] += \"<script>\\n%s\\n</script>\" % js\n return\n\n if os.path.isfile ('bundle.js'):\n with open('bundle.js') as f:\n js = f.read()\n project.info['task_presenter'] += \"<script>\\n%s\\n</script>\" % js\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | _load_data | python | def _load_data(data_file, data_type):
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data | Load data from CSV, JSON, Excel, ..., formats. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L141-L194 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | _add_tasks | python | def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Add tasks to a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L197-L230 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | _add_helpingmaterials | python | def _add_helpingmaterials(config, helping_file, helping_type):
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Add helping materials to a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L233-L277 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | _delete_tasks | python | def _delete_tasks(config, task_id, limit=100, offset=0):
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Delete tasks from a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L281-L305 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | _update_tasks_redundancy | python | def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Update tasks redundancy from a project. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L308-L345 | [
"def find_project_by_short_name(short_name, pbclient, all=None):\n \"\"\"Return project by short_name.\"\"\"\n try:\n response = pbclient.find_project(short_name=short_name, all=all)\n check_api_error(response)\n if (len(response) == 0):\n msg = '%s not found! You can use the all=1 argument to \\\n search in all the server.'\n error = 'Project Not Found'\n raise ProjectNotFound(msg, error)\n return response[0]\n except exceptions.ConnectionError:\n raise\n except ProjectNotFound:\n raise\n",
"def check_api_error(api_response):\n print(api_response)\n \"\"\"Check if returned API response contains an error.\"\"\"\n if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:\n print(\"Server response code: %s\" % api_response['code'])\n print(\"Server response: %s\" % api_response)\n raise exceptions.HTTPError('Unexpected response', response=api_response)\n if type(api_response) == dict and (api_response.get('status') == 'failed'):\n if 'ProgrammingError' in api_response.get('exception_cls'):\n raise DatabaseError(message='PyBossa database error.',\n error=api_response)\n if ('DBIntegrityError' in api_response.get('exception_cls') and\n 'project' in api_response.get('target')):\n msg = 'PyBossa project already exists.'\n raise ProjectAlreadyExists(message=msg, error=api_response)\n if 'project' in api_response.get('target'):\n raise ProjectNotFound(message='PyBossa Project not found',\n error=api_response)\n if 'task' in api_response.get('target'):\n raise TaskNotFound(message='PyBossa Task not found',\n error=api_response)\n else:\n print(\"Server response: %s\" % api_response)\n raise exceptions.HTTPError('Unexpected response', response=api_response)\n",
"def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):\n \"Return sleep time if more tasks than those \" \\\n \"allowed by the server are requested.\"\n # Get header from server\n endpoint = config.server + endpoint\n headers = requests.head(endpoint).headers\n # Get limit\n server_limit = int(headers.get('X-RateLimit-Remaining', 0))\n limit = server_limit or limit\n # Get reset time\n reset_epoch = int(headers.get('X-RateLimit-Reset', 0))\n # Compute sleep time\n sleep = (reset_epoch -\n calendar.timegm(datetime.datetime.utcnow().utctimetuple()))\n msg = 'Warning: %s remaining hits to the endpoint.' \\\n ' Auto-throttling enabled!' % limit\n # If we have less than 10 hits on the endpoint, sleep\n if limit <= 10:\n return (sleep, msg)\n else:\n return 0, None\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | find_project_by_short_name | python | def find_project_by_short_name(short_name, pbclient, all=None):
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise | Return project by short_name. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L348-L362 | [
"def check_api_error(api_response):\n print(api_response)\n \"\"\"Check if returned API response contains an error.\"\"\"\n if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:\n print(\"Server response code: %s\" % api_response['code'])\n print(\"Server response: %s\" % api_response)\n raise exceptions.HTTPError('Unexpected response', response=api_response)\n if type(api_response) == dict and (api_response.get('status') == 'failed'):\n if 'ProgrammingError' in api_response.get('exception_cls'):\n raise DatabaseError(message='PyBossa database error.',\n error=api_response)\n if ('DBIntegrityError' in api_response.get('exception_cls') and\n 'project' in api_response.get('target')):\n msg = 'PyBossa project already exists.'\n raise ProjectAlreadyExists(message=msg, error=api_response)\n if 'project' in api_response.get('target'):\n raise ProjectNotFound(message='PyBossa Project not found',\n error=api_response)\n if 'task' in api_response.get('target'):\n raise TaskNotFound(message='PyBossa Task not found',\n error=api_response)\n else:\n print(\"Server response: %s\" % api_response)\n raise exceptions.HTTPError('Unexpected response', response=api_response)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | check_api_error | python | def check_api_error(api_response):
print(api_response)
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response) | Check if returned API response contains an error. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L365-L388 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | format_error | python | def format_error(module, error):
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1) | Format the error for the given module. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L391-L397 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | create_task_info | python | def create_task_info(task):
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info | Create task_info field. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L400-L407 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_helping_material_info(helping):
"""Create helping_material_info field."""
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
Scifabric/pbs | helpers.py | create_helping_material_info | python | def create_helping_material_info(helping):
helping_info = None
file_path = None
if helping.get('info'):
helping_info = helping['info']
else:
helping_info = helping
if helping_info.get('file_path'):
file_path = helping_info.get('file_path')
del helping_info['file_path']
return helping_info, file_path | Create helping_material_info field. | train | https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L410-L421 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for the pbs command line client.
This module exports the following methods:
* find_project_by_short_name: return the project by short_name.
* check_api_errors: check for API errors returned by a PyBossa server.
* format_error: format error message.
* format_json_task: format a CSV row into JSON.
"""
import re
import os
import csv
import json
import time
import click
import datetime
from StringIO import StringIO
import polib
import openpyxl
import itertools
from requests import exceptions
import requests
from pbsexceptions import *
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import calendar
__all__ = ['find_project_by_short_name', 'check_api_error',
'format_error', 'format_json_task', '_create_project',
'_update_project', '_add_tasks', 'create_task_info',
'_delete_tasks', 'enable_auto_throttling',
'_update_tasks_redundancy',
'_update_project_watch', 'PbsHandler',
'_update_task_presenter_bundle_js', 'row_empty',
'_add_helpingmaterials', 'create_helping_material_info']
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_project_watch(config, task_presenter, results,
long_description, tutorial): # pragma: no cover
"""Update a project in a loop."""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = os.getcwd()
event_handler = PbsHandler(config, task_presenter, results,
long_description, tutorial)
observer = Observer()
# We only want the current folder, not sub-folders
observer.schedule(event_handler, path, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _update_task_presenter_bundle_js(project):
"""Append to template a distribution bundle js."""
if os.path.isfile ('bundle.min.js'):
with open('bundle.min.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
return
if os.path.isfile ('bundle.js'):
with open('bundle.js') as f:
js = f.read()
project.info['task_presenter'] += "<script>\n%s\n</script>" % js
def _update_project(config, task_presenter, results,
long_description, tutorial):
"""Update a project."""
try:
# Get project
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
# Update attributes
project.name = config.project['name']
project.short_name = config.project['short_name']
project.description = config.project['description']
# Update long_description
with open(long_description, 'r') as f:
project.long_description = f.read()
# Update task presenter
with open(task_presenter, 'r') as f:
project.info['task_presenter'] = f.read()
_update_task_presenter_bundle_js(project)
# Update results
with open(results, 'r') as f:
project.info['results'] = f.read()
# Update tutorial
with open(tutorial, 'r') as f:
project.info['tutorial'] = f.read()
response = config.pbclient.update_project(project)
check_api_error(response)
return ("Project %s updated!" % config.project['short_name'])
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except ProjectNotFound:
return ("Project not found! The project: %s is missing." \
" Use the flag --all=1 to search in all the server " \
% config.project['short_name'])
except TaskNotFound:
raise
def _load_data(data_file, data_type):
"""Load data from CSV, JSON, Excel, ..., formats."""
raw_data = data_file.read()
if data_type is None:
data_type = data_file.name.split('.')[-1]
# Data list to process
data = []
# JSON type
if data_type == 'json':
data = json.loads(raw_data)
return data
# CSV type
elif data_type == 'csv':
csv_data = StringIO(raw_data)
reader = csv.DictReader(csv_data, delimiter=',')
for line in reader:
data.append(line)
return data
elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']:
excel_data = StringIO(raw_data)
wb = openpyxl.load_workbook(excel_data)
ws = wb.active
# First headers
headers = []
for row in ws.iter_rows(max_row=1):
for cell in row:
tmp = '_'.join(cell.value.split(" ")).lower()
headers.append(tmp)
# Simulate DictReader
for row in ws.iter_rows(row_offset=1):
values = []
for cell in row:
values.append(cell.value)
tmp = dict(itertools.izip(headers, values))
if len(values) == len(headers) and not row_empty(values):
data.append(tmp)
return data
# PO type
elif data_type == 'po':
po = polib.pofile(raw_data)
for entry in po.untranslated_entries():
data.append(entry.__dict__)
return data
# PROPERTIES type (used in Java and Firefox extensions)
elif data_type == 'properties':
lines = raw_data.split('\n')
for l in lines:
if l:
var_id, string = l.split('=')
tmp = dict(var_id=var_id, string=string)
data.append(tmp)
return data
else:
return data
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _delete_tasks(config, task_id, limit=100, offset=0):
"""Delete tasks from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.delete_task(task_id)
check_api_error(response)
return "Task.id = %s and its associated task_runs have been deleted" % task_id
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
while len(tasks) > 0:
for t in tasks:
response = config.pbclient.delete_task(t.id)
check_api_error(response)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks and task_runs have been deleted"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def _update_tasks_redundancy(config, task_id, redundancy, limit=300, offset=0):
"""Update tasks redundancy from a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
if task_id:
response = config.pbclient.find_tasks(project.id, id=task_id)
check_api_error(response)
task = response[0]
task.n_answers = redundancy
response = config.pbclient.update_task(task)
check_api_error(response)
msg = "Task.id = %s redundancy has been updated to %s" % (task_id,
redundancy)
return msg
else:
limit = limit
offset = offset
tasks = config.pbclient.get_tasks(project.id, limit, offset)
with click.progressbar(tasks, label="Updating Tasks") as pgbar:
while len(tasks) > 0:
for t in pgbar:
t.n_answers = redundancy
response = config.pbclient.update_task(t)
check_api_error(response)
# Check if for the data we have to auto-throttle task update
sleep, msg = enable_auto_throttling(config, tasks)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
offset += limit
tasks = config.pbclient.get_tasks(project.id, limit, offset)
return "All tasks redundancy have been updated"
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise
def check_api_error(api_response):
print(api_response)
"""Check if returned API response contains an error."""
if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200:
print("Server response code: %s" % api_response['code'])
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
if type(api_response) == dict and (api_response.get('status') == 'failed'):
if 'ProgrammingError' in api_response.get('exception_cls'):
raise DatabaseError(message='PyBossa database error.',
error=api_response)
if ('DBIntegrityError' in api_response.get('exception_cls') and
'project' in api_response.get('target')):
msg = 'PyBossa project already exists.'
raise ProjectAlreadyExists(message=msg, error=api_response)
if 'project' in api_response.get('target'):
raise ProjectNotFound(message='PyBossa Project not found',
error=api_response)
if 'task' in api_response.get('target'):
raise TaskNotFound(message='PyBossa Task not found',
error=api_response)
else:
print("Server response: %s" % api_response)
raise exceptions.HTTPError('Unexpected response', response=api_response)
def format_error(module, error):
"""Format the error for the given module."""
logging.error(module)
# Beautify JSON error
print error.message
print json.dumps(error.error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def create_task_info(task):
"""Create task_info field."""
task_info = None
if task.get('info'):
task_info = task['info']
else:
task_info = task
return task_info
def enable_auto_throttling(config, data, limit=299, endpoint='/api/task'):
"Return sleep time if more tasks than those " \
"allowed by the server are requested."
# Get header from server
endpoint = config.server + endpoint
headers = requests.head(endpoint).headers
# Get limit
server_limit = int(headers.get('X-RateLimit-Remaining', 0))
limit = server_limit or limit
# Get reset time
reset_epoch = int(headers.get('X-RateLimit-Reset', 0))
# Compute sleep time
sleep = (reset_epoch -
calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
msg = 'Warning: %s remaining hits to the endpoint.' \
' Auto-throttling enabled!' % limit
# If we have less than 10 hits on the endpoint, sleep
if limit <= 10:
return (sleep, msg)
else:
return 0, None
def format_json_task(task_info):
"""Format task_info into JSON if applicable."""
try:
return json.loads(task_info)
except:
return task_info
def row_empty(row):
"""Check if all values in row are None."""
for value in row:
if value is not None:
return False
return True
class PbsHandler(PatternMatchingEventHandler):
patterns = ['*/template.html', '*/tutorial.html',
'*/long_description.md', '*/results.html',
'*/bundle.js', '*/bundle.min.js']
def __init__(self, config, task_presenter, results,
long_description, tutorial):
super(PbsHandler, self).__init__()
self.config = config
self.task_presenter = task_presenter
self.results = results
self.long_description = long_description
self.tutorial = tutorial
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
res = _update_project(self.config, self.task_presenter, self.results,
self.long_description, self.tutorial)
logging.info(res)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker.run | python | def run(self) -> None:
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker") | Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L81-L115 | [
"def _consume_messages(self) -> None:\n with self.kuyruk.channel() as ch:\n # Set prefetch count to 1. If we don't set this, RabbitMQ keeps\n # sending messages while we are already working on a message.\n ch.basic_qos(0, 1, True)\n\n self._declare_queues(ch)\n self._consume_queues(ch)\n logger.info('Consumer started')\n self._main_loop(ch)\n"
] | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker._process_message | python | def _process_message(self, message: amqp.Message) -> None:
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description) | Processes the message received from the queue. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L175-L190 | null | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker._apply_task | python | def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta)) | Logs the time spent while running the task. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L292-L304 | null | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker._shutdown_timer | python | def _shutdown_timer(self) -> None:
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown() | Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L351-L359 | null | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker._handle_sigint | python | def _handle_sigint(self, signum: int, frame: Any) -> None:
logger.warning("Catched SIGINT")
self.shutdown() | Shutdown after processing current task. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L366-L369 | null | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker._handle_sighup | python | def _handle_sighup(self, signum: int, frame: Any) -> None:
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info) | Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L376-L386 | null | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker._handle_sigusr1 | python | def _handle_sigusr1(signum: int, frame: Any) -> None:
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70) | Print stacktrace. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L389-L393 | null | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/worker.py | Worker._handle_sigusr2 | python | def _handle_sigusr2(self, signum: int, frame: Any) -> None:
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard | Drop current task. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/worker.py#L395-L400 | null | class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
cenkalti/kuyruk | kuyruk/task.py | Task.send_to_queue | python | def send_to_queue(
self,
args: Tuple=(),
kwargs: Dict[str, Any]={},
host: str=None,
wait_result: Union[int, float]=None,
message_ttl: Union[int, float]=None,
) -> Any:
if self.kuyruk.config.EAGER:
# Run the task in current process
result = self.apply(*args, **kwargs)
return result if wait_result else None
logger.debug("Task.send_to_queue args=%r, kwargs=%r", args, kwargs)
queue = self._queue_for_host(host)
description = self._get_description(args, kwargs)
self._send_signal(signals.task_presend, args=args, kwargs=kwargs, description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
if wait_result:
# Use direct reply-to feature from RabbitMQ:
# https://www.rabbitmq.com/direct-reply-to.html
msg.properties['reply_to'] = 'amq.rabbitmq.reply-to'
if message_ttl:
msg.properties['expiration'] = str(int(message_ttl * 1000))
with self.kuyruk.channel() as ch:
if wait_result:
result = Result(ch.connection)
ch.basic_consume(queue='amq.rabbitmq.reply-to', no_ack=True, callback=result.process_message)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
ch.basic_publish(msg, exchange="", routing_key=queue)
self._send_signal(signals.task_postsend, args=args, kwargs=kwargs, description=description)
if wait_result:
return result.wait(wait_result) | Sends a message to the queue.
A worker will run the task's function when it receives the message.
:param args: Arguments that will be passed to task on execution.
:param kwargs: Keyword arguments that will be passed to task
on execution.
:param host: Send this task to specific host. ``host`` will be
appended to the queue name. If ``host`` is "localhost", hostname
of the server will be appended to the queue name.
:param wait_result:
Wait for result from worker for ``wait_result`` seconds.
If timeout occurs,
:class:`~kuyruk.exceptions.ResultTimeout` is raised.
If excecption occurs in worker,
:class:`~kuyruk.exceptions.RemoteException` is raised.
:param message_ttl:
If set, message will be destroyed in queue after ``message_ttl``
seconds.
:return: Result from worker if ``wait_result`` is set,
else :const:`None`. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/task.py#L69-L130 | [
"def wait(self, timeout: Union[float, int]) -> None:\n logger.debug(\"Waiting for task result\")\n\n start = monotonic()\n while True:\n if self.exception:\n raise RemoteException(self.exception['type'],\n self.exception['value'],\n self.exception['traceback'])\n if self.result:\n return self.result\n\n if monotonic() - start > timeout:\n raise ResultTimeout\n\n try:\n self._connection.heartbeat_tick()\n self._connection.drain_events(timeout=1)\n except socket.timeout:\n pass\n",
"def _queue_for_host(self, host: str) -> str:\n if not host:\n return self.queue\n if host == 'localhost':\n host = socket.gethostname()\n return \"%s.%s\" % (self.queue, host)\n",
"def _get_description(self, args: Tuple, kwargs: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Return the dictionary to be sent to the queue.\"\"\"\n return {\n 'id': uuid1().hex,\n 'args': args,\n 'kwargs': kwargs,\n 'module': self._module_name,\n 'function': self.f.__name__,\n 'sender_hostname': socket.gethostname(),\n 'sender_pid': os.getpid(),\n 'sender_cmd': ' '.join(sys.argv),\n 'sender_timestamp': datetime.utcnow().isoformat()[:19],\n }\n",
"def _send_signal(self, sig: Signal, **data: Any) -> None:\n sig.send(self.kuyruk, task=self, **data)\n",
"def apply(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Called by workers to run the wrapped function.\n You may call it yourself if you want to run the task in current process\n without sending to the queue.\n\n If task has a `retry` property it will be retried on failure.\n\n If task has a `max_run_time` property the task will not be allowed to\n run more than that.\n \"\"\"\n def send_signal(sig: Signal, **extra: Any) -> None:\n self._send_signal(sig, args=args, kwargs=kwargs, **extra)\n\n logger.debug(\"Applying %r, args=%r, kwargs=%r\", self, args, kwargs)\n\n send_signal(signals.task_preapply)\n try:\n tries = 1 + self.retry\n while 1:\n tries -= 1\n send_signal(signals.task_prerun)\n try:\n with time_limit(self.max_run_time or 0):\n return self.f(*args, **kwargs)\n except Exception:\n send_signal(signals.task_error, exc_info=sys.exc_info())\n if tries <= 0:\n raise\n else:\n break\n finally:\n send_signal(signals.task_postrun)\n except Exception:\n send_signal(signals.task_failure, exc_info=sys.exc_info())\n raise\n else:\n send_signal(signals.task_success)\n finally:\n send_signal(signals.task_postapply)\n"
] | class Task:
"""Calling a :class:`~kuyruk.Task` object serializes the task to JSON
and sends it to the queue.
:param retry: Retry this times before give up.
The failed task will be retried in the same worker.
:param max_run_time: Maximum allowed time in seconds for task to
complete.
"""
def __init__(self, f: Callable, kuyruk: 'Kuyruk', queue: str, retry: int=0, max_run_time: int=None) -> None:
self.f = f
self.kuyruk = kuyruk
self.queue = queue
self.retry = retry
self.max_run_time = max_run_time
self._send_signal(signals.task_init)
def __repr__(self) -> str:
return "<Task of %r>" % self.name
def __call__(self, *args: Tuple, **kwargs: Any) -> None:
"""When a function is wrapped with a task decorator it will be
converted to a Task object. By overriding __call__ method we are
sending this task to queue instead of invoking the function
without changing the client code.
"""
logger.debug("Task.__call__ args=%r, kwargs=%r", args, kwargs)
self.send_to_queue(args, kwargs)
def subtask(self, args: Tuple=(), kwargs: Dict[str, Any]={}, host: str=None) -> SubTask:
return SubTask(self, args, kwargs, host)
def _queue_for_host(self, host: str) -> str:
if not host:
return self.queue
if host == 'localhost':
host = socket.gethostname()
return "%s.%s" % (self.queue, host)
def _get_description(self, args: Tuple, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Return the dictionary to be sent to the queue."""
return {
'id': uuid1().hex,
'args': args,
'kwargs': kwargs,
'module': self._module_name,
'function': self.f.__name__,
'sender_hostname': socket.gethostname(),
'sender_pid': os.getpid(),
'sender_cmd': ' '.join(sys.argv),
'sender_timestamp': datetime.utcnow().isoformat()[:19],
}
def _send_signal(self, sig: Signal, **data: Any) -> None:
sig.send(self.kuyruk, task=self, **data)
def apply(self, *args: Any, **kwargs: Any) -> Any:
"""Called by workers to run the wrapped function.
You may call it yourself if you want to run the task in current process
without sending to the queue.
If task has a `retry` property it will be retried on failure.
If task has a `max_run_time` property the task will not be allowed to
run more than that.
"""
def send_signal(sig: Signal, **extra: Any) -> None:
self._send_signal(sig, args=args, kwargs=kwargs, **extra)
logger.debug("Applying %r, args=%r, kwargs=%r", self, args, kwargs)
send_signal(signals.task_preapply)
try:
tries = 1 + self.retry
while 1:
tries -= 1
send_signal(signals.task_prerun)
try:
with time_limit(self.max_run_time or 0):
return self.f(*args, **kwargs)
except Exception:
send_signal(signals.task_error, exc_info=sys.exc_info())
if tries <= 0:
raise
else:
break
finally:
send_signal(signals.task_postrun)
except Exception:
send_signal(signals.task_failure, exc_info=sys.exc_info())
raise
else:
send_signal(signals.task_success)
finally:
send_signal(signals.task_postapply)
@property
def name(self) -> str:
"""Full path to the task in the form of `<module>.<function>`.
Workers find and import tasks by this path.
"""
return "%s:%s" % (self._module_name, self.f.__name__)
@property
def _module_name(self) -> str:
"""Module name of the wrapped function."""
name = self.f.__module__
if name == '__main__':
return importer.main_module_name()
return name
|
cenkalti/kuyruk | kuyruk/task.py | Task._get_description | python | def _get_description(self, args: Tuple, kwargs: Dict[str, Any]) -> Dict[str, Any]:
return {
'id': uuid1().hex,
'args': args,
'kwargs': kwargs,
'module': self._module_name,
'function': self.f.__name__,
'sender_hostname': socket.gethostname(),
'sender_pid': os.getpid(),
'sender_cmd': ' '.join(sys.argv),
'sender_timestamp': datetime.utcnow().isoformat()[:19],
} | Return the dictionary to be sent to the queue. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/task.py#L139-L151 | null | class Task:
"""Calling a :class:`~kuyruk.Task` object serializes the task to JSON
and sends it to the queue.
:param retry: Retry this times before give up.
The failed task will be retried in the same worker.
:param max_run_time: Maximum allowed time in seconds for task to
complete.
"""
def __init__(self, f: Callable, kuyruk: 'Kuyruk', queue: str, retry: int=0, max_run_time: int=None) -> None:
self.f = f
self.kuyruk = kuyruk
self.queue = queue
self.retry = retry
self.max_run_time = max_run_time
self._send_signal(signals.task_init)
def __repr__(self) -> str:
return "<Task of %r>" % self.name
def __call__(self, *args: Tuple, **kwargs: Any) -> None:
"""When a function is wrapped with a task decorator it will be
converted to a Task object. By overriding __call__ method we are
sending this task to queue instead of invoking the function
without changing the client code.
"""
logger.debug("Task.__call__ args=%r, kwargs=%r", args, kwargs)
self.send_to_queue(args, kwargs)
def subtask(self, args: Tuple=(), kwargs: Dict[str, Any]={}, host: str=None) -> SubTask:
return SubTask(self, args, kwargs, host)
def send_to_queue(
self,
args: Tuple=(),
kwargs: Dict[str, Any]={},
host: str=None,
wait_result: Union[int, float]=None,
message_ttl: Union[int, float]=None,
) -> Any:
"""
Sends a message to the queue.
A worker will run the task's function when it receives the message.
:param args: Arguments that will be passed to task on execution.
:param kwargs: Keyword arguments that will be passed to task
on execution.
:param host: Send this task to specific host. ``host`` will be
appended to the queue name. If ``host`` is "localhost", hostname
of the server will be appended to the queue name.
:param wait_result:
Wait for result from worker for ``wait_result`` seconds.
If timeout occurs,
:class:`~kuyruk.exceptions.ResultTimeout` is raised.
If excecption occurs in worker,
:class:`~kuyruk.exceptions.RemoteException` is raised.
:param message_ttl:
If set, message will be destroyed in queue after ``message_ttl``
seconds.
:return: Result from worker if ``wait_result`` is set,
else :const:`None`.
"""
if self.kuyruk.config.EAGER:
# Run the task in current process
result = self.apply(*args, **kwargs)
return result if wait_result else None
logger.debug("Task.send_to_queue args=%r, kwargs=%r", args, kwargs)
queue = self._queue_for_host(host)
description = self._get_description(args, kwargs)
self._send_signal(signals.task_presend, args=args, kwargs=kwargs, description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
if wait_result:
# Use direct reply-to feature from RabbitMQ:
# https://www.rabbitmq.com/direct-reply-to.html
msg.properties['reply_to'] = 'amq.rabbitmq.reply-to'
if message_ttl:
msg.properties['expiration'] = str(int(message_ttl * 1000))
with self.kuyruk.channel() as ch:
if wait_result:
result = Result(ch.connection)
ch.basic_consume(queue='amq.rabbitmq.reply-to', no_ack=True, callback=result.process_message)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
ch.basic_publish(msg, exchange="", routing_key=queue)
self._send_signal(signals.task_postsend, args=args, kwargs=kwargs, description=description)
if wait_result:
return result.wait(wait_result)
def _queue_for_host(self, host: str) -> str:
if not host:
return self.queue
if host == 'localhost':
host = socket.gethostname()
return "%s.%s" % (self.queue, host)
def _send_signal(self, sig: Signal, **data: Any) -> None:
sig.send(self.kuyruk, task=self, **data)
def apply(self, *args: Any, **kwargs: Any) -> Any:
"""Called by workers to run the wrapped function.
You may call it yourself if you want to run the task in current process
without sending to the queue.
If task has a `retry` property it will be retried on failure.
If task has a `max_run_time` property the task will not be allowed to
run more than that.
"""
def send_signal(sig: Signal, **extra: Any) -> None:
self._send_signal(sig, args=args, kwargs=kwargs, **extra)
logger.debug("Applying %r, args=%r, kwargs=%r", self, args, kwargs)
send_signal(signals.task_preapply)
try:
tries = 1 + self.retry
while 1:
tries -= 1
send_signal(signals.task_prerun)
try:
with time_limit(self.max_run_time or 0):
return self.f(*args, **kwargs)
except Exception:
send_signal(signals.task_error, exc_info=sys.exc_info())
if tries <= 0:
raise
else:
break
finally:
send_signal(signals.task_postrun)
except Exception:
send_signal(signals.task_failure, exc_info=sys.exc_info())
raise
else:
send_signal(signals.task_success)
finally:
send_signal(signals.task_postapply)
@property
def name(self) -> str:
"""Full path to the task in the form of `<module>.<function>`.
Workers find and import tasks by this path.
"""
return "%s:%s" % (self._module_name, self.f.__name__)
@property
def _module_name(self) -> str:
"""Module name of the wrapped function."""
name = self.f.__module__
if name == '__main__':
return importer.main_module_name()
return name
|
cenkalti/kuyruk | kuyruk/task.py | Task.apply | python | def apply(self, *args: Any, **kwargs: Any) -> Any:
def send_signal(sig: Signal, **extra: Any) -> None:
self._send_signal(sig, args=args, kwargs=kwargs, **extra)
logger.debug("Applying %r, args=%r, kwargs=%r", self, args, kwargs)
send_signal(signals.task_preapply)
try:
tries = 1 + self.retry
while 1:
tries -= 1
send_signal(signals.task_prerun)
try:
with time_limit(self.max_run_time or 0):
return self.f(*args, **kwargs)
except Exception:
send_signal(signals.task_error, exc_info=sys.exc_info())
if tries <= 0:
raise
else:
break
finally:
send_signal(signals.task_postrun)
except Exception:
send_signal(signals.task_failure, exc_info=sys.exc_info())
raise
else:
send_signal(signals.task_success)
finally:
send_signal(signals.task_postapply) | Called by workers to run the wrapped function.
You may call it yourself if you want to run the task in current process
without sending to the queue.
If task has a `retry` property it will be retried on failure.
If task has a `max_run_time` property the task will not be allowed to
run more than that. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/task.py#L156-L194 | [
"def send_signal(sig: Signal, **extra: Any) -> None:\n self._send_signal(sig, args=args, kwargs=kwargs, **extra)\n"
] | class Task:
"""Calling a :class:`~kuyruk.Task` object serializes the task to JSON
and sends it to the queue.
:param retry: Retry this times before give up.
The failed task will be retried in the same worker.
:param max_run_time: Maximum allowed time in seconds for task to
complete.
"""
def __init__(self, f: Callable, kuyruk: 'Kuyruk', queue: str, retry: int=0, max_run_time: int=None) -> None:
self.f = f
self.kuyruk = kuyruk
self.queue = queue
self.retry = retry
self.max_run_time = max_run_time
self._send_signal(signals.task_init)
def __repr__(self) -> str:
return "<Task of %r>" % self.name
def __call__(self, *args: Tuple, **kwargs: Any) -> None:
"""When a function is wrapped with a task decorator it will be
converted to a Task object. By overriding __call__ method we are
sending this task to queue instead of invoking the function
without changing the client code.
"""
logger.debug("Task.__call__ args=%r, kwargs=%r", args, kwargs)
self.send_to_queue(args, kwargs)
def subtask(self, args: Tuple=(), kwargs: Dict[str, Any]={}, host: str=None) -> SubTask:
return SubTask(self, args, kwargs, host)
def send_to_queue(
self,
args: Tuple=(),
kwargs: Dict[str, Any]={},
host: str=None,
wait_result: Union[int, float]=None,
message_ttl: Union[int, float]=None,
) -> Any:
"""
Sends a message to the queue.
A worker will run the task's function when it receives the message.
:param args: Arguments that will be passed to task on execution.
:param kwargs: Keyword arguments that will be passed to task
on execution.
:param host: Send this task to specific host. ``host`` will be
appended to the queue name. If ``host`` is "localhost", hostname
of the server will be appended to the queue name.
:param wait_result:
Wait for result from worker for ``wait_result`` seconds.
If timeout occurs,
:class:`~kuyruk.exceptions.ResultTimeout` is raised.
If excecption occurs in worker,
:class:`~kuyruk.exceptions.RemoteException` is raised.
:param message_ttl:
If set, message will be destroyed in queue after ``message_ttl``
seconds.
:return: Result from worker if ``wait_result`` is set,
else :const:`None`.
"""
if self.kuyruk.config.EAGER:
# Run the task in current process
result = self.apply(*args, **kwargs)
return result if wait_result else None
logger.debug("Task.send_to_queue args=%r, kwargs=%r", args, kwargs)
queue = self._queue_for_host(host)
description = self._get_description(args, kwargs)
self._send_signal(signals.task_presend, args=args, kwargs=kwargs, description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
if wait_result:
# Use direct reply-to feature from RabbitMQ:
# https://www.rabbitmq.com/direct-reply-to.html
msg.properties['reply_to'] = 'amq.rabbitmq.reply-to'
if message_ttl:
msg.properties['expiration'] = str(int(message_ttl * 1000))
with self.kuyruk.channel() as ch:
if wait_result:
result = Result(ch.connection)
ch.basic_consume(queue='amq.rabbitmq.reply-to', no_ack=True, callback=result.process_message)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
ch.basic_publish(msg, exchange="", routing_key=queue)
self._send_signal(signals.task_postsend, args=args, kwargs=kwargs, description=description)
if wait_result:
return result.wait(wait_result)
def _queue_for_host(self, host: str) -> str:
if not host:
return self.queue
if host == 'localhost':
host = socket.gethostname()
return "%s.%s" % (self.queue, host)
def _get_description(self, args: Tuple, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Return the dictionary to be sent to the queue."""
return {
'id': uuid1().hex,
'args': args,
'kwargs': kwargs,
'module': self._module_name,
'function': self.f.__name__,
'sender_hostname': socket.gethostname(),
'sender_pid': os.getpid(),
'sender_cmd': ' '.join(sys.argv),
'sender_timestamp': datetime.utcnow().isoformat()[:19],
}
def _send_signal(self, sig: Signal, **data: Any) -> None:
sig.send(self.kuyruk, task=self, **data)
@property
def name(self) -> str:
"""Full path to the task in the form of `<module>.<function>`.
Workers find and import tasks by this path.
"""
return "%s:%s" % (self._module_name, self.f.__name__)
@property
def _module_name(self) -> str:
"""Module name of the wrapped function."""
name = self.f.__module__
if name == '__main__':
return importer.main_module_name()
return name
|
cenkalti/kuyruk | kuyruk/task.py | Task._module_name | python | def _module_name(self) -> str:
name = self.f.__module__
if name == '__main__':
return importer.main_module_name()
return name | Module name of the wrapped function. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/task.py#L205-L210 | null | class Task:
"""Calling a :class:`~kuyruk.Task` object serializes the task to JSON
and sends it to the queue.
:param retry: Retry this times before give up.
The failed task will be retried in the same worker.
:param max_run_time: Maximum allowed time in seconds for task to
complete.
"""
def __init__(self, f: Callable, kuyruk: 'Kuyruk', queue: str, retry: int=0, max_run_time: int=None) -> None:
self.f = f
self.kuyruk = kuyruk
self.queue = queue
self.retry = retry
self.max_run_time = max_run_time
self._send_signal(signals.task_init)
def __repr__(self) -> str:
return "<Task of %r>" % self.name
def __call__(self, *args: Tuple, **kwargs: Any) -> None:
"""When a function is wrapped with a task decorator it will be
converted to a Task object. By overriding __call__ method we are
sending this task to queue instead of invoking the function
without changing the client code.
"""
logger.debug("Task.__call__ args=%r, kwargs=%r", args, kwargs)
self.send_to_queue(args, kwargs)
def subtask(self, args: Tuple=(), kwargs: Dict[str, Any]={}, host: str=None) -> SubTask:
return SubTask(self, args, kwargs, host)
def send_to_queue(
self,
args: Tuple=(),
kwargs: Dict[str, Any]={},
host: str=None,
wait_result: Union[int, float]=None,
message_ttl: Union[int, float]=None,
) -> Any:
"""
Sends a message to the queue.
A worker will run the task's function when it receives the message.
:param args: Arguments that will be passed to task on execution.
:param kwargs: Keyword arguments that will be passed to task
on execution.
:param host: Send this task to specific host. ``host`` will be
appended to the queue name. If ``host`` is "localhost", hostname
of the server will be appended to the queue name.
:param wait_result:
Wait for result from worker for ``wait_result`` seconds.
If timeout occurs,
:class:`~kuyruk.exceptions.ResultTimeout` is raised.
If excecption occurs in worker,
:class:`~kuyruk.exceptions.RemoteException` is raised.
:param message_ttl:
If set, message will be destroyed in queue after ``message_ttl``
seconds.
:return: Result from worker if ``wait_result`` is set,
else :const:`None`.
"""
if self.kuyruk.config.EAGER:
# Run the task in current process
result = self.apply(*args, **kwargs)
return result if wait_result else None
logger.debug("Task.send_to_queue args=%r, kwargs=%r", args, kwargs)
queue = self._queue_for_host(host)
description = self._get_description(args, kwargs)
self._send_signal(signals.task_presend, args=args, kwargs=kwargs, description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
if wait_result:
# Use direct reply-to feature from RabbitMQ:
# https://www.rabbitmq.com/direct-reply-to.html
msg.properties['reply_to'] = 'amq.rabbitmq.reply-to'
if message_ttl:
msg.properties['expiration'] = str(int(message_ttl * 1000))
with self.kuyruk.channel() as ch:
if wait_result:
result = Result(ch.connection)
ch.basic_consume(queue='amq.rabbitmq.reply-to', no_ack=True, callback=result.process_message)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
ch.basic_publish(msg, exchange="", routing_key=queue)
self._send_signal(signals.task_postsend, args=args, kwargs=kwargs, description=description)
if wait_result:
return result.wait(wait_result)
def _queue_for_host(self, host: str) -> str:
if not host:
return self.queue
if host == 'localhost':
host = socket.gethostname()
return "%s.%s" % (self.queue, host)
def _get_description(self, args: Tuple, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Return the dictionary to be sent to the queue."""
return {
'id': uuid1().hex,
'args': args,
'kwargs': kwargs,
'module': self._module_name,
'function': self.f.__name__,
'sender_hostname': socket.gethostname(),
'sender_pid': os.getpid(),
'sender_cmd': ' '.join(sys.argv),
'sender_timestamp': datetime.utcnow().isoformat()[:19],
}
def _send_signal(self, sig: Signal, **data: Any) -> None:
sig.send(self.kuyruk, task=self, **data)
def apply(self, *args: Any, **kwargs: Any) -> Any:
"""Called by workers to run the wrapped function.
You may call it yourself if you want to run the task in current process
without sending to the queue.
If task has a `retry` property it will be retried on failure.
If task has a `max_run_time` property the task will not be allowed to
run more than that.
"""
def send_signal(sig: Signal, **extra: Any) -> None:
self._send_signal(sig, args=args, kwargs=kwargs, **extra)
logger.debug("Applying %r, args=%r, kwargs=%r", self, args, kwargs)
send_signal(signals.task_preapply)
try:
tries = 1 + self.retry
while 1:
tries -= 1
send_signal(signals.task_prerun)
try:
with time_limit(self.max_run_time or 0):
return self.f(*args, **kwargs)
except Exception:
send_signal(signals.task_error, exc_info=sys.exc_info())
if tries <= 0:
raise
else:
break
finally:
send_signal(signals.task_postrun)
except Exception:
send_signal(signals.task_failure, exc_info=sys.exc_info())
raise
else:
send_signal(signals.task_success)
finally:
send_signal(signals.task_postapply)
@property
def name(self) -> str:
"""Full path to the task in the form of `<module>.<function>`.
Workers find and import tasks by this path.
"""
return "%s:%s" % (self._module_name, self.f.__name__)
@property
|
cenkalti/kuyruk | kuyruk/config.py | Config.from_object | python | def from_object(self, obj: Union[str, Any]) -> None:
if isinstance(obj, str):
obj = importer.import_object_str(obj)
for key in dir(obj):
if key.isupper():
value = getattr(obj, key)
self._setattr(key, value)
logger.info("Config is loaded from object: %r", obj) | Load values from an object. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/config.py#L62-L72 | [
"def import_object_str(s: str) -> Any:\n module, obj = s.rsplit('.', 1)\n return import_object(module, obj)\n",
"def _setattr(self, key: str, value: Any) -> None:\n if not hasattr(self.__class__, key):\n raise ValueError(\"Unknown config key: %s\" % key)\n\n setattr(self, key, value)\n"
] | class Config:
"""Kuyruk configuration object. Default values are defined as
class attributes. Additional attributes may be added by extensions.
"""
# Connection Options
####################
RABBIT_HOST = 'localhost'
"""RabbitMQ host."""
RABBIT_PORT = 5672
"""RabbitMQ port."""
RABBIT_VIRTUAL_HOST = '/'
"""RabbitMQ virtual host."""
RABBIT_USER = 'guest'
"""RabbitMQ user."""
RABBIT_PASSWORD = 'guest'
"""RabbitMQ password."""
RABBIT_HEARTBEAT = 60
RABBIT_CONNECT_TIMEOUT = 5
RABBIT_READ_TIMEOUT = 5
RABBIT_WRITE_TIMEOUT = 5
TCP_USER_TIMEOUT = 60
# Instance Options
##################
EAGER = False
"""Run tasks in the process without sending to queue. Useful in tests."""
# Worker Options
################
WORKER_MAX_LOAD = None
"""Pause consuming queue when the load goes above this level."""
WORKER_MAX_RUN_TIME = None
"""Gracefully shutdown worker after running this seconds."""
WORKER_LOGGING_LEVEL = 'INFO'
"""Logging level of root logger."""
def from_dict(self, d: Dict[str, Any]) -> None:
"""Load values from a dict."""
for key, value in d.items():
if key.isupper():
self._setattr(key, value)
logger.info("Config is loaded from dict: %r", d)
def from_pymodule(self, name: str) -> None:
module = importer.import_module(name)
for key, value in module.__dict__.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from module: %s", name)
def from_pyfile(self, filename: str) -> None:
"""Load values from a Python file."""
globals_ = {} # type: Dict[str, Any]
locals_ = {} # type: Dict[str, Any]
with open(filename, "rb") as f:
exec(compile(f.read(), filename, 'exec'), globals_, locals_)
for key, value in locals_.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from file: %s", filename)
def from_env_vars(self) -> None:
"""Load values from environment variables.
Keys must start with `KUYRUK_`."""
for key, value in os.environ.items():
if key.startswith('KUYRUK_'):
key = key[7:]
if hasattr(Config, key):
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
self._setattr(key, value)
def _setattr(self, key: str, value: Any) -> None:
if not hasattr(self.__class__, key):
raise ValueError("Unknown config key: %s" % key)
setattr(self, key, value)
|
cenkalti/kuyruk | kuyruk/config.py | Config.from_dict | python | def from_dict(self, d: Dict[str, Any]) -> None:
for key, value in d.items():
if key.isupper():
self._setattr(key, value)
logger.info("Config is loaded from dict: %r", d) | Load values from a dict. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/config.py#L74-L80 | [
"def _setattr(self, key: str, value: Any) -> None:\n if not hasattr(self.__class__, key):\n raise ValueError(\"Unknown config key: %s\" % key)\n\n setattr(self, key, value)\n"
] | class Config:
"""Kuyruk configuration object. Default values are defined as
class attributes. Additional attributes may be added by extensions.
"""
# Connection Options
####################
RABBIT_HOST = 'localhost'
"""RabbitMQ host."""
RABBIT_PORT = 5672
"""RabbitMQ port."""
RABBIT_VIRTUAL_HOST = '/'
"""RabbitMQ virtual host."""
RABBIT_USER = 'guest'
"""RabbitMQ user."""
RABBIT_PASSWORD = 'guest'
"""RabbitMQ password."""
RABBIT_HEARTBEAT = 60
RABBIT_CONNECT_TIMEOUT = 5
RABBIT_READ_TIMEOUT = 5
RABBIT_WRITE_TIMEOUT = 5
TCP_USER_TIMEOUT = 60
# Instance Options
##################
EAGER = False
"""Run tasks in the process without sending to queue. Useful in tests."""
# Worker Options
################
WORKER_MAX_LOAD = None
"""Pause consuming queue when the load goes above this level."""
WORKER_MAX_RUN_TIME = None
"""Gracefully shutdown worker after running this seconds."""
WORKER_LOGGING_LEVEL = 'INFO'
"""Logging level of root logger."""
def from_object(self, obj: Union[str, Any]) -> None:
"""Load values from an object."""
if isinstance(obj, str):
obj = importer.import_object_str(obj)
for key in dir(obj):
if key.isupper():
value = getattr(obj, key)
self._setattr(key, value)
logger.info("Config is loaded from object: %r", obj)
def from_pymodule(self, name: str) -> None:
module = importer.import_module(name)
for key, value in module.__dict__.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from module: %s", name)
def from_pyfile(self, filename: str) -> None:
"""Load values from a Python file."""
globals_ = {} # type: Dict[str, Any]
locals_ = {} # type: Dict[str, Any]
with open(filename, "rb") as f:
exec(compile(f.read(), filename, 'exec'), globals_, locals_)
for key, value in locals_.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from file: %s", filename)
def from_env_vars(self) -> None:
"""Load values from environment variables.
Keys must start with `KUYRUK_`."""
for key, value in os.environ.items():
if key.startswith('KUYRUK_'):
key = key[7:]
if hasattr(Config, key):
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
self._setattr(key, value)
def _setattr(self, key: str, value: Any) -> None:
if not hasattr(self.__class__, key):
raise ValueError("Unknown config key: %s" % key)
setattr(self, key, value)
|
cenkalti/kuyruk | kuyruk/config.py | Config.from_pyfile | python | def from_pyfile(self, filename: str) -> None:
globals_ = {} # type: Dict[str, Any]
locals_ = {} # type: Dict[str, Any]
with open(filename, "rb") as f:
exec(compile(f.read(), filename, 'exec'), globals_, locals_)
for key, value in locals_.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from file: %s", filename) | Load values from a Python file. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/config.py#L90-L101 | [
"def _setattr(self, key: str, value: Any) -> None:\n if not hasattr(self.__class__, key):\n raise ValueError(\"Unknown config key: %s\" % key)\n\n setattr(self, key, value)\n"
] | class Config:
"""Kuyruk configuration object. Default values are defined as
class attributes. Additional attributes may be added by extensions.
"""
# Connection Options
####################
RABBIT_HOST = 'localhost'
"""RabbitMQ host."""
RABBIT_PORT = 5672
"""RabbitMQ port."""
RABBIT_VIRTUAL_HOST = '/'
"""RabbitMQ virtual host."""
RABBIT_USER = 'guest'
"""RabbitMQ user."""
RABBIT_PASSWORD = 'guest'
"""RabbitMQ password."""
RABBIT_HEARTBEAT = 60
RABBIT_CONNECT_TIMEOUT = 5
RABBIT_READ_TIMEOUT = 5
RABBIT_WRITE_TIMEOUT = 5
TCP_USER_TIMEOUT = 60
# Instance Options
##################
EAGER = False
"""Run tasks in the process without sending to queue. Useful in tests."""
# Worker Options
################
WORKER_MAX_LOAD = None
"""Pause consuming queue when the load goes above this level."""
WORKER_MAX_RUN_TIME = None
"""Gracefully shutdown worker after running this seconds."""
WORKER_LOGGING_LEVEL = 'INFO'
"""Logging level of root logger."""
def from_object(self, obj: Union[str, Any]) -> None:
"""Load values from an object."""
if isinstance(obj, str):
obj = importer.import_object_str(obj)
for key in dir(obj):
if key.isupper():
value = getattr(obj, key)
self._setattr(key, value)
logger.info("Config is loaded from object: %r", obj)
def from_dict(self, d: Dict[str, Any]) -> None:
"""Load values from a dict."""
for key, value in d.items():
if key.isupper():
self._setattr(key, value)
logger.info("Config is loaded from dict: %r", d)
def from_pymodule(self, name: str) -> None:
module = importer.import_module(name)
for key, value in module.__dict__.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from module: %s", name)
def from_env_vars(self) -> None:
"""Load values from environment variables.
Keys must start with `KUYRUK_`."""
for key, value in os.environ.items():
if key.startswith('KUYRUK_'):
key = key[7:]
if hasattr(Config, key):
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
self._setattr(key, value)
def _setattr(self, key: str, value: Any) -> None:
if not hasattr(self.__class__, key):
raise ValueError("Unknown config key: %s" % key)
setattr(self, key, value)
|
cenkalti/kuyruk | kuyruk/config.py | Config.from_env_vars | python | def from_env_vars(self) -> None:
for key, value in os.environ.items():
if key.startswith('KUYRUK_'):
key = key[7:]
if hasattr(Config, key):
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
self._setattr(key, value) | Load values from environment variables.
Keys must start with `KUYRUK_`. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/config.py#L103-L115 | [
"def _setattr(self, key: str, value: Any) -> None:\n if not hasattr(self.__class__, key):\n raise ValueError(\"Unknown config key: %s\" % key)\n\n setattr(self, key, value)\n"
] | class Config:
"""Kuyruk configuration object. Default values are defined as
class attributes. Additional attributes may be added by extensions.
"""
# Connection Options
####################
RABBIT_HOST = 'localhost'
"""RabbitMQ host."""
RABBIT_PORT = 5672
"""RabbitMQ port."""
RABBIT_VIRTUAL_HOST = '/'
"""RabbitMQ virtual host."""
RABBIT_USER = 'guest'
"""RabbitMQ user."""
RABBIT_PASSWORD = 'guest'
"""RabbitMQ password."""
RABBIT_HEARTBEAT = 60
RABBIT_CONNECT_TIMEOUT = 5
RABBIT_READ_TIMEOUT = 5
RABBIT_WRITE_TIMEOUT = 5
TCP_USER_TIMEOUT = 60
# Instance Options
##################
EAGER = False
"""Run tasks in the process without sending to queue. Useful in tests."""
# Worker Options
################
WORKER_MAX_LOAD = None
"""Pause consuming queue when the load goes above this level."""
WORKER_MAX_RUN_TIME = None
"""Gracefully shutdown worker after running this seconds."""
WORKER_LOGGING_LEVEL = 'INFO'
"""Logging level of root logger."""
def from_object(self, obj: Union[str, Any]) -> None:
"""Load values from an object."""
if isinstance(obj, str):
obj = importer.import_object_str(obj)
for key in dir(obj):
if key.isupper():
value = getattr(obj, key)
self._setattr(key, value)
logger.info("Config is loaded from object: %r", obj)
def from_dict(self, d: Dict[str, Any]) -> None:
"""Load values from a dict."""
for key, value in d.items():
if key.isupper():
self._setattr(key, value)
logger.info("Config is loaded from dict: %r", d)
def from_pymodule(self, name: str) -> None:
module = importer.import_module(name)
for key, value in module.__dict__.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from module: %s", name)
def from_pyfile(self, filename: str) -> None:
"""Load values from a Python file."""
globals_ = {} # type: Dict[str, Any]
locals_ = {} # type: Dict[str, Any]
with open(filename, "rb") as f:
exec(compile(f.read(), filename, 'exec'), globals_, locals_)
for key, value in locals_.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from file: %s", filename)
def _setattr(self, key: str, value: Any) -> None:
if not hasattr(self.__class__, key):
raise ValueError("Unknown config key: %s" % key)
setattr(self, key, value)
|
cenkalti/kuyruk | kuyruk/kuyruk.py | Kuyruk.task | python | def task(self, queue: str = 'kuyruk', **kwargs: Any) -> Callable:
def wrapper(f: Callable) -> Task:
return Task(f, self, queue, **kwargs)
return wrapper | Wrap functions with this decorator to convert them to *tasks*.
After wrapping, calling the function will send a message to
a queue instead of running the function.
:param queue: Queue name for the tasks.
:param kwargs: Keyword arguments will be passed to
:class:`~kuyruk.Task` constructor.
:return: Callable :class:`~kuyruk.Task` object wrapping the original
function. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/kuyruk.py#L37-L53 | null | class Kuyruk:
"""
Provides :func:`~kuyruk.Kuyruk.task` decorator to convert a function
into a :class:`~kuyruk.Task`.
Provides :func:`~kuyruk.Kuyruk.channel` context manager for opening a
new channel on the connection.
Connection is opened when the first channel is created.
:param config: Must be an instance of :class:`~kuyruk.Config`.
If ``None``, default config is used.
See :class:`~kuyruk.Config` for default values.
"""
def __init__(self, config: Config = None) -> None:
if config is None:
config = Config()
self.config = config
self.extensions = {} # type: Dict[str, Any]
@contextmanager
def channel(self) -> Iterator[amqp.Channel]:
"""Returns a new channel from a new connection as a context manager."""
with self.connection() as conn:
ch = conn.channel()
logger.info('Opened new channel')
with _safe_close(ch):
yield ch
@contextmanager
def connection(self) -> Iterator[amqp.Connection]:
"""Returns a new connection as a context manager."""
TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+.
socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT}
if sys.platform.startswith('darwin'):
del socket_settings[TCP_USER_TIMEOUT]
conn = amqp.Connection(
host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT),
userid=self.config.RABBIT_USER,
password=self.config.RABBIT_PASSWORD,
virtual_host=self.config.RABBIT_VIRTUAL_HOST,
connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT,
read_timeout=self.config.RABBIT_READ_TIMEOUT,
write_timeout=self.config.RABBIT_WRITE_TIMEOUT,
socket_settings=socket_settings,
heartbeat=self.config.RABBIT_HEARTBEAT,
)
conn.connect()
logger.info('Connected to RabbitMQ')
with _safe_close(conn):
yield conn
def send_tasks_to_queue(self, subtasks: List[SubTask]) -> None:
if self.config.EAGER:
for subtask in subtasks:
subtask.task.apply(*subtask.args, **subtask.kwargs)
return
declared_queues = set() # type: Set[str]
with self.channel() as ch:
for subtask in subtasks:
queue = subtask.task._queue_for_host(subtask.host)
if queue not in declared_queues:
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
declared_queues.add(queue)
description = subtask.task._get_description(subtask.args,
subtask.kwargs)
subtask.task._send_signal(signals.task_presend,
args=subtask.args,
kwargs=subtask.kwargs,
description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
ch.basic_publish(msg, exchange="", routing_key=queue)
subtask.task._send_signal(signals.task_postsend,
args=subtask.args,
kwargs=subtask.kwargs,
description=description)
|
cenkalti/kuyruk | kuyruk/kuyruk.py | Kuyruk.channel | python | def channel(self) -> Iterator[amqp.Channel]:
with self.connection() as conn:
ch = conn.channel()
logger.info('Opened new channel')
with _safe_close(ch):
yield ch | Returns a new channel from a new connection as a context manager. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/kuyruk.py#L56-L62 | null | class Kuyruk:
"""
Provides :func:`~kuyruk.Kuyruk.task` decorator to convert a function
into a :class:`~kuyruk.Task`.
Provides :func:`~kuyruk.Kuyruk.channel` context manager for opening a
new channel on the connection.
Connection is opened when the first channel is created.
:param config: Must be an instance of :class:`~kuyruk.Config`.
If ``None``, default config is used.
See :class:`~kuyruk.Config` for default values.
"""
def __init__(self, config: Config = None) -> None:
if config is None:
config = Config()
self.config = config
self.extensions = {} # type: Dict[str, Any]
def task(self, queue: str = 'kuyruk', **kwargs: Any) -> Callable:
"""
Wrap functions with this decorator to convert them to *tasks*.
After wrapping, calling the function will send a message to
a queue instead of running the function.
:param queue: Queue name for the tasks.
:param kwargs: Keyword arguments will be passed to
:class:`~kuyruk.Task` constructor.
:return: Callable :class:`~kuyruk.Task` object wrapping the original
function.
"""
def wrapper(f: Callable) -> Task:
return Task(f, self, queue, **kwargs)
return wrapper
@contextmanager
@contextmanager
def connection(self) -> Iterator[amqp.Connection]:
"""Returns a new connection as a context manager."""
TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+.
socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT}
if sys.platform.startswith('darwin'):
del socket_settings[TCP_USER_TIMEOUT]
conn = amqp.Connection(
host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT),
userid=self.config.RABBIT_USER,
password=self.config.RABBIT_PASSWORD,
virtual_host=self.config.RABBIT_VIRTUAL_HOST,
connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT,
read_timeout=self.config.RABBIT_READ_TIMEOUT,
write_timeout=self.config.RABBIT_WRITE_TIMEOUT,
socket_settings=socket_settings,
heartbeat=self.config.RABBIT_HEARTBEAT,
)
conn.connect()
logger.info('Connected to RabbitMQ')
with _safe_close(conn):
yield conn
def send_tasks_to_queue(self, subtasks: List[SubTask]) -> None:
if self.config.EAGER:
for subtask in subtasks:
subtask.task.apply(*subtask.args, **subtask.kwargs)
return
declared_queues = set() # type: Set[str]
with self.channel() as ch:
for subtask in subtasks:
queue = subtask.task._queue_for_host(subtask.host)
if queue not in declared_queues:
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
declared_queues.add(queue)
description = subtask.task._get_description(subtask.args,
subtask.kwargs)
subtask.task._send_signal(signals.task_presend,
args=subtask.args,
kwargs=subtask.kwargs,
description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
ch.basic_publish(msg, exchange="", routing_key=queue)
subtask.task._send_signal(signals.task_postsend,
args=subtask.args,
kwargs=subtask.kwargs,
description=description)
|
cenkalti/kuyruk | kuyruk/kuyruk.py | Kuyruk.connection | python | def connection(self) -> Iterator[amqp.Connection]:
TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+.
socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT}
if sys.platform.startswith('darwin'):
del socket_settings[TCP_USER_TIMEOUT]
conn = amqp.Connection(
host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT),
userid=self.config.RABBIT_USER,
password=self.config.RABBIT_PASSWORD,
virtual_host=self.config.RABBIT_VIRTUAL_HOST,
connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT,
read_timeout=self.config.RABBIT_READ_TIMEOUT,
write_timeout=self.config.RABBIT_WRITE_TIMEOUT,
socket_settings=socket_settings,
heartbeat=self.config.RABBIT_HEARTBEAT,
)
conn.connect()
logger.info('Connected to RabbitMQ')
with _safe_close(conn):
yield conn | Returns a new connection as a context manager. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/kuyruk.py#L65-L87 | null | class Kuyruk:
"""
Provides :func:`~kuyruk.Kuyruk.task` decorator to convert a function
into a :class:`~kuyruk.Task`.
Provides :func:`~kuyruk.Kuyruk.channel` context manager for opening a
new channel on the connection.
Connection is opened when the first channel is created.
:param config: Must be an instance of :class:`~kuyruk.Config`.
If ``None``, default config is used.
See :class:`~kuyruk.Config` for default values.
"""
def __init__(self, config: Config = None) -> None:
if config is None:
config = Config()
self.config = config
self.extensions = {} # type: Dict[str, Any]
def task(self, queue: str = 'kuyruk', **kwargs: Any) -> Callable:
"""
Wrap functions with this decorator to convert them to *tasks*.
After wrapping, calling the function will send a message to
a queue instead of running the function.
:param queue: Queue name for the tasks.
:param kwargs: Keyword arguments will be passed to
:class:`~kuyruk.Task` constructor.
:return: Callable :class:`~kuyruk.Task` object wrapping the original
function.
"""
def wrapper(f: Callable) -> Task:
return Task(f, self, queue, **kwargs)
return wrapper
@contextmanager
def channel(self) -> Iterator[amqp.Channel]:
"""Returns a new channel from a new connection as a context manager."""
with self.connection() as conn:
ch = conn.channel()
logger.info('Opened new channel')
with _safe_close(ch):
yield ch
@contextmanager
def send_tasks_to_queue(self, subtasks: List[SubTask]) -> None:
if self.config.EAGER:
for subtask in subtasks:
subtask.task.apply(*subtask.args, **subtask.kwargs)
return
declared_queues = set() # type: Set[str]
with self.channel() as ch:
for subtask in subtasks:
queue = subtask.task._queue_for_host(subtask.host)
if queue not in declared_queues:
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
declared_queues.add(queue)
description = subtask.task._get_description(subtask.args,
subtask.kwargs)
subtask.task._send_signal(signals.task_presend,
args=subtask.args,
kwargs=subtask.kwargs,
description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
ch.basic_publish(msg, exchange="", routing_key=queue)
subtask.task._send_signal(signals.task_postsend,
args=subtask.args,
kwargs=subtask.kwargs,
description=description)
|
cenkalti/kuyruk | kuyruk/importer.py | import_module | python | def import_module(name: str) -> ModuleType:
logger.debug("Importing module: %s", name)
if name == main_module_name():
return main_module
return importlib.import_module(name) | Import module by it's name from following places in order:
- main module
- current working directory
- Python path | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/importer.py#L13-L24 | [
"def main_module_name() -> str:\n \"\"\"Returns main module and module name pair.\"\"\"\n if not hasattr(main_module, '__file__'):\n # running from interactive shell\n return None\n\n main_filename = os.path.basename(main_module.__file__)\n module_name, ext = os.path.splitext(main_filename)\n return module_name\n"
] | import os
import sys
import logging
import importlib
from typing import Any
from types import ModuleType
logger = logging.getLogger(__name__)
main_module = sys.modules['__main__']
def import_object(module_name: str, object_name: str) -> Any:
module = import_module(module_name)
try:
return getattr(module, object_name)
except AttributeError as e:
raise ImportError(e)
def import_object_str(s: str) -> Any:
module, obj = s.rsplit('.', 1)
return import_object(module, obj)
def main_module_name() -> str:
"""Returns main module and module name pair."""
if not hasattr(main_module, '__file__'):
# running from interactive shell
return None
main_filename = os.path.basename(main_module.__file__)
module_name, ext = os.path.splitext(main_filename)
return module_name
|
cenkalti/kuyruk | kuyruk/importer.py | main_module_name | python | def main_module_name() -> str:
if not hasattr(main_module, '__file__'):
# running from interactive shell
return None
main_filename = os.path.basename(main_module.__file__)
module_name, ext = os.path.splitext(main_filename)
return module_name | Returns main module and module name pair. | train | https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/importer.py#L40-L48 | null | import os
import sys
import logging
import importlib
from typing import Any
from types import ModuleType
logger = logging.getLogger(__name__)
main_module = sys.modules['__main__']
def import_module(name: str) -> ModuleType:
"""Import module by it's name from following places in order:
- main module
- current working directory
- Python path
"""
logger.debug("Importing module: %s", name)
if name == main_module_name():
return main_module
return importlib.import_module(name)
def import_object(module_name: str, object_name: str) -> Any:
module = import_module(module_name)
try:
return getattr(module, object_name)
except AttributeError as e:
raise ImportError(e)
def import_object_str(s: str) -> Any:
module, obj = s.rsplit('.', 1)
return import_object(module, obj)
|
lmjohns3/downhill | examples/rosenbrock.py | build | python | def build(algo, init):
'''Build and return an optimizer for the rosenbrock function.
In downhill, an optimizer can be constructed using the build() top-level
function. This function requires several Theano quantities such as the loss
being optimized and the parameters to update during optimization.
'''
x = theano.shared(np.array(init, FLOAT), name='x')
n = 0.1 * RandomStreams().normal((len(init) - 1, ))
monitors = []
if len(init) == 2:
# this gives us access to the x and y locations during optimization.
monitors.extend([('x', x[:-1].sum()), ('y', x[1:].sum())])
return downhill.build(
algo,
loss=(n + 100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),
params=[x],
monitors=monitors,
monitor_gradients=True) | Build and return an optimizer for the rosenbrock function.
In downhill, an optimizer can be constructed using the build() top-level
function. This function requires several Theano quantities such as the loss
being optimized and the parameters to update during optimization. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/examples/rosenbrock.py#L15-L33 | [
"def build(algo, loss, params=None, inputs=None, updates=(), monitors=(),\n monitor_gradients=False):\n '''Construct an optimizer by name.\n\n Parameters\n ----------\n algo : str\n The name of the optimization algorithm to build.\n loss : Theano expression\n Loss function to minimize. This must be a scalar-valued expression.\n params : list of Theano variables, optional\n Symbolic variables to adjust to minimize the loss. If not given, these\n will be computed automatically by walking the computation graph.\n inputs : list of Theano variables, optional\n Symbolic variables required to compute the loss. If not given, these\n will be computed automatically by walking the computation graph.\n updates : list of update pairs, optional\n A list of pairs providing updates for the internal of the loss\n computation. Normally this is empty, but it can be provided if the loss,\n for example, requires an update to an internal random number generator.\n monitors : dict or sequence of (str, Theano expression) tuples, optional\n Additional values to monitor during optimization. These must be provided\n as either a sequence of (name, expression) tuples, or as a dictionary\n mapping string names to Theano expressions.\n monitor_gradients : bool, optional\n If True, add monitors to log the norms of the parameter gradients during\n optimization. Defaults to False.\n\n Returns\n -------\n optimizer : :class:`Optimizer`\n An optimizer instance.\n '''\n return Optimizer.build(algo, loss, params, inputs,\n updates=updates, monitors=monitors,\n monitor_gradients=monitor_gradients)\n"
] | '''Helper functions for rosenbrock optimization examples.'''
import downhill
import numpy as np
import theano
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
COLORS = ('#d62728 #1f77b4 #2ca02c #9467bd #ff7f0e '
'#e377c2 #8c564b #bcbd22 #7f7f7f #17becf').split()
FLOAT = 'df'[theano.config.floatX == 'float32']
def build_and_trace(algo, init, limit=100, **kwargs):
'''Run an optimizer on the rosenbrock function. Return xs, ys, and losses.
In downhill, optimization algorithms can be iterated over to progressively
minimize the loss. At each iteration, the optimizer yields a dictionary of
monitor values that were computed during that iteration. Here we build an
optimizer and then run it for a fixed number of iterations.
'''
kw = dict(min_improvement=0, patience=0, max_gradient_norm=100)
kw.update(kwargs)
xs, ys, loss = [], [], []
for tm, _ in build(algo, init).iterate([[]], **kw):
if len(init) == 2:
xs.append(tm['x'])
ys.append(tm['y'])
loss.append(tm['loss'])
if len(loss) == limit:
break
# Return the optimization up to any failure of patience.
return xs[:-9], ys[:-9], loss[-9]
def test(algos, n=10, init=[-1.1, 0], limit=100):
'''Run several optimizers for comparison.
Each optimizer is run a fixed number of times with random hyperparameter
values, and the results are yielded back to the caller (often stored in a
dictionary).
Returns
-------
results : sequence of (key, value) pairs
A sequence of results from running tests. Each result contains a "key"
that describes the test run and a "value" that contains the results from
the run. The key is a tuple containing (a) the algorithm, (b) the
learning rate, (c) the momentum, (d) the RMS halflife, and (e) the RMS
regularizer. The value is a tuple containing the (a) x-values and (b)
y-values during the optimization, and (c) the loss value. (The x- and
y-value are only non-empty for 2D experiments.)
'''
for algo in algos:
for _ in range(n):
mu = max(0, np.random.uniform(0, 2) - 1)
rate = np.exp(np.random.uniform(-8, -1))
half = int(np.exp(np.random.uniform(0, 4)))
reg = np.exp(np.random.uniform(-12, 0))
yield (algo, rate, mu, half, reg), build_and_trace(
algo, init, limit, momentum=mu, learning_rate=rate,
rms_halflife=half, rms_regularizer=reg)
|
lmjohns3/downhill | examples/rosenbrock.py | build_and_trace | python | def build_and_trace(algo, init, limit=100, **kwargs):
'''Run an optimizer on the rosenbrock function. Return xs, ys, and losses.
In downhill, optimization algorithms can be iterated over to progressively
minimize the loss. At each iteration, the optimizer yields a dictionary of
monitor values that were computed during that iteration. Here we build an
optimizer and then run it for a fixed number of iterations.
'''
kw = dict(min_improvement=0, patience=0, max_gradient_norm=100)
kw.update(kwargs)
xs, ys, loss = [], [], []
for tm, _ in build(algo, init).iterate([[]], **kw):
if len(init) == 2:
xs.append(tm['x'])
ys.append(tm['y'])
loss.append(tm['loss'])
if len(loss) == limit:
break
# Return the optimization up to any failure of patience.
return xs[:-9], ys[:-9], loss[-9] | Run an optimizer on the rosenbrock function. Return xs, ys, and losses.
In downhill, optimization algorithms can be iterated over to progressively
minimize the loss. At each iteration, the optimizer yields a dictionary of
monitor values that were computed during that iteration. Here we build an
optimizer and then run it for a fixed number of iterations. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/examples/rosenbrock.py#L36-L55 | [
"def build(algo, init):\n '''Build and return an optimizer for the rosenbrock function.\n\n In downhill, an optimizer can be constructed using the build() top-level\n function. This function requires several Theano quantities such as the loss\n being optimized and the parameters to update during optimization.\n '''\n x = theano.shared(np.array(init, FLOAT), name='x')\n n = 0.1 * RandomStreams().normal((len(init) - 1, ))\n monitors = []\n if len(init) == 2:\n # this gives us access to the x and y locations during optimization.\n monitors.extend([('x', x[:-1].sum()), ('y', x[1:].sum())])\n return downhill.build(\n algo,\n loss=(n + 100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),\n params=[x],\n monitors=monitors,\n monitor_gradients=True)\n"
] | '''Helper functions for rosenbrock optimization examples.'''
import downhill
import numpy as np
import theano
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
COLORS = ('#d62728 #1f77b4 #2ca02c #9467bd #ff7f0e '
'#e377c2 #8c564b #bcbd22 #7f7f7f #17becf').split()
FLOAT = 'df'[theano.config.floatX == 'float32']
def build(algo, init):
'''Build and return an optimizer for the rosenbrock function.
In downhill, an optimizer can be constructed using the build() top-level
function. This function requires several Theano quantities such as the loss
being optimized and the parameters to update during optimization.
'''
x = theano.shared(np.array(init, FLOAT), name='x')
n = 0.1 * RandomStreams().normal((len(init) - 1, ))
monitors = []
if len(init) == 2:
# this gives us access to the x and y locations during optimization.
monitors.extend([('x', x[:-1].sum()), ('y', x[1:].sum())])
return downhill.build(
algo,
loss=(n + 100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),
params=[x],
monitors=monitors,
monitor_gradients=True)
def test(algos, n=10, init=[-1.1, 0], limit=100):
'''Run several optimizers for comparison.
Each optimizer is run a fixed number of times with random hyperparameter
values, and the results are yielded back to the caller (often stored in a
dictionary).
Returns
-------
results : sequence of (key, value) pairs
A sequence of results from running tests. Each result contains a "key"
that describes the test run and a "value" that contains the results from
the run. The key is a tuple containing (a) the algorithm, (b) the
learning rate, (c) the momentum, (d) the RMS halflife, and (e) the RMS
regularizer. The value is a tuple containing the (a) x-values and (b)
y-values during the optimization, and (c) the loss value. (The x- and
y-value are only non-empty for 2D experiments.)
'''
for algo in algos:
for _ in range(n):
mu = max(0, np.random.uniform(0, 2) - 1)
rate = np.exp(np.random.uniform(-8, -1))
half = int(np.exp(np.random.uniform(0, 4)))
reg = np.exp(np.random.uniform(-12, 0))
yield (algo, rate, mu, half, reg), build_and_trace(
algo, init, limit, momentum=mu, learning_rate=rate,
rms_halflife=half, rms_regularizer=reg)
|
lmjohns3/downhill | downhill/__init__.py | minimize | python | def minimize(loss, train, valid=None, params=None, inputs=None, algo='rmsprop',
updates=(), monitors=(), monitor_gradients=False, batch_size=32,
train_batches=None, valid_batches=None, **kwargs):
'''Minimize a loss function with respect to some symbolic parameters.
Additional keyword arguments are passed to the underlying :class:`Optimizer
<downhill.base.Optimizer>` instance.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
train : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable
Dataset to use for computing gradient updates.
valid : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable, optional
Dataset to use for validating the minimization process. The training
dataset is used if this is not provided.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
algo : str, optional
Name of the minimization algorithm to use. Must be one of the strings
that can be passed to :func:`build`. Defaults to ``'rmsprop'``.
updates : list of update pairs, optional
A list of pairs providing updates for the internal of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : dict or sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as either a sequence of (name, expression) tuples, or as a dictionary
mapping string names to Theano expressions.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
batch_size : int, optional
Size of batches provided by datasets. Defaults to 32.
train_batches : int, optional
Number of batches of training data to iterate over during one pass of
optimization. Defaults to None, which uses the entire training dataset.
valid_batches : int, optional
Number of batches of validation data to iterate over during one pass of
validation. Defaults to None, which uses the entire validation dataset.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to monitor values. This dictionary
will always contain the ``'loss'`` key, giving the value of the loss
evaluated on the training dataset.
valid_monitors : dict
A dictionary mapping monitor names to monitor values, evaluated on the
validation dataset. This dictionary will always contain the ``'loss'``
key, giving the value of the loss function. Because validation is not
always computed after every optimization update, these monitor values
may be "stale"; however, they will always contain the most recently
computed values.
'''
if not isinstance(train, Dataset):
train = Dataset(
train,
name='train',
batch_size=batch_size,
iteration_size=train_batches,
)
if valid is not None and not isinstance(valid, Dataset):
valid = Dataset(
valid,
name='valid',
batch_size=batch_size,
iteration_size=valid_batches,
)
return build(
algo,
loss=loss,
params=params,
inputs=inputs,
updates=updates,
monitors=monitors,
monitor_gradients=monitor_gradients,
).minimize(train, valid, **kwargs) | Minimize a loss function with respect to some symbolic parameters.
Additional keyword arguments are passed to the underlying :class:`Optimizer
<downhill.base.Optimizer>` instance.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
train : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable
Dataset to use for computing gradient updates.
valid : :class:`Dataset <downhill.dataset.Dataset>`, ndarray, or callable, optional
Dataset to use for validating the minimization process. The training
dataset is used if this is not provided.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
algo : str, optional
Name of the minimization algorithm to use. Must be one of the strings
that can be passed to :func:`build`. Defaults to ``'rmsprop'``.
updates : list of update pairs, optional
A list of pairs providing updates for the internal of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : dict or sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as either a sequence of (name, expression) tuples, or as a dictionary
mapping string names to Theano expressions.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
batch_size : int, optional
Size of batches provided by datasets. Defaults to 32.
train_batches : int, optional
Number of batches of training data to iterate over during one pass of
optimization. Defaults to None, which uses the entire training dataset.
valid_batches : int, optional
Number of batches of validation data to iterate over during one pass of
validation. Defaults to None, which uses the entire validation dataset.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to monitor values. This dictionary
will always contain the ``'loss'`` key, giving the value of the loss
evaluated on the training dataset.
valid_monitors : dict
A dictionary mapping monitor names to monitor values, evaluated on the
validation dataset. This dictionary will always contain the ``'loss'``
key, giving the value of the loss function. Because validation is not
always computed after every optimization update, these monitor values
may be "stale"; however, they will always contain the most recently
computed values. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/__init__.py#L9-L91 | [
"def build(algo, loss, params=None, inputs=None, updates=(), monitors=(),\n monitor_gradients=False):\n '''Construct an optimizer by name.\n\n Parameters\n ----------\n algo : str\n The name of the optimization algorithm to build.\n loss : Theano expression\n Loss function to minimize. This must be a scalar-valued expression.\n params : list of Theano variables, optional\n Symbolic variables to adjust to minimize the loss. If not given, these\n will be computed automatically by walking the computation graph.\n inputs : list of Theano variables, optional\n Symbolic variables required to compute the loss. If not given, these\n will be computed automatically by walking the computation graph.\n updates : list of update pairs, optional\n A list of pairs providing updates for the internal of the loss\n computation. Normally this is empty, but it can be provided if the loss,\n for example, requires an update to an internal random number generator.\n monitors : dict or sequence of (str, Theano expression) tuples, optional\n Additional values to monitor during optimization. These must be provided\n as either a sequence of (name, expression) tuples, or as a dictionary\n mapping string names to Theano expressions.\n monitor_gradients : bool, optional\n If True, add monitors to log the norms of the parameter gradients during\n optimization. Defaults to False.\n\n Returns\n -------\n optimizer : :class:`Optimizer`\n An optimizer instance.\n '''\n return Optimizer.build(algo, loss, params, inputs,\n updates=updates, monitors=monitors,\n monitor_gradients=monitor_gradients)\n"
] | from .adaptive import *
from .base import build, Optimizer
from .dataset import Dataset
from .first_order import *
__version__ = '0.5.0pre'
|
lmjohns3/downhill | examples/rosenbrock-2d.py | make_label | python | def make_label(loss, key):
'''Create a legend label for an optimization run.'''
algo, rate, mu, half, reg = key
slots, args = ['{:.3f}', '{}', 'm={:.3f}'], [loss, algo, mu]
if algo in 'SGD NAG RMSProp Adam ESGD'.split():
slots.append('lr={:.2e}')
args.append(rate)
if algo in 'RMSProp ADADELTA ESGD'.split():
slots.append('rmsh={}')
args.append(half)
slots.append('rmsr={:.2e}')
args.append(reg)
return ' '.join(slots).format(*args) | Create a legend label for an optimization run. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/examples/rosenbrock-2d.py#L25-L37 | null | '''Optimization example using the two-dimensional Rosenbrock "banana" function.
This example trains up several optimization algorithms and displays the
performance of each algorithm across several different (randomly-chosen)
hyperparameter settings.
This example is meant to show how different optimization algorithms perform when
given the same optimization problem. Many of the algorithms' performances are
strongly dependent on the values of various hyperparameters, such as the
learning rate and momentum values.
'''
import matplotlib.pyplot as plt
import numpy as np
import rosenbrock
def by_loss(item):
'''Helper for sorting optimization runs by their final loss value.'''
label, (xs, ys, loss) = item
return loss
# Here we run a number of rosenbrock optimization algorithms and measure their
# performance. Below we plot the results.
algos = 'SGD NAG RMSProp RProp Adam ADADELTA ESGD'.split()
results = ((make_label(loss, key), xs, ys)
for key, (xs, ys, loss)
in sorted(rosenbrock.test(algos), key=by_loss))
_, ax = plt.subplots(1, 1)
for color, (label, xs, ys) in zip(rosenbrock.COLORS, results):
ax.plot(xs, ys, 'o-', color=color, label=label,
alpha=0.8, lw=2, markersize=5,
mew=1, mec=color, mfc='none')
# make a contour plot of the rosenbrock function surface.
X, Y = np.meshgrid(np.linspace(-1.3, 1.3, 31), np.linspace(-0.9, 1.7, 31))
Z = 100 * (Y - X ** 2) ** 2 + (1 - X) ** 2
ax.plot([1], [1], 'x', mew=3, markersize=10, color='#111111')
ax.contourf(X, Y, Z, np.logspace(-1, 3, 31), cmap='gray_r')
ax.set_xlim(-1.3, 1.3)
ax.set_ylim(-0.9, 1.7)
plt.legend(loc='lower right')
plt.show()
|
lmjohns3/downhill | downhill/dataset.py | Dataset.iterate | python | def iterate(self, shuffle=True):
'''Iterate over batches in the dataset.
This method generates ``iteration_size`` batches from the dataset and
then returns.
Parameters
----------
shuffle : bool, optional
Shuffle the batches in this dataset if the iteration reaches the end
of the batch list. Defaults to True.
Yields
------
batches : data batches
A sequence of batches---often from a training, validation, or test
dataset.
'''
for _ in range(self.iteration_size):
if self._callable is not None:
yield self._callable()
else:
yield self._next_batch(shuffle) | Iterate over batches in the dataset.
This method generates ``iteration_size`` batches from the dataset and
then returns.
Parameters
----------
shuffle : bool, optional
Shuffle the batches in this dataset if the iteration reaches the end
of the batch list. Defaults to True.
Yields
------
batches : data batches
A sequence of batches---often from a training, validation, or test
dataset. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/dataset.py#L183-L205 | [
"def _next_batch(self, shuffle=True):\n batch = [x.iloc[i] if hasattr(x, 'iloc') else x[i]\n for x, i in zip(self._inputs, self._slices[self._index])]\n self._index += 1\n if self._index >= len(self._slices):\n if shuffle:\n self.shuffle()\n self._index = 0\n return batch\n"
] | class Dataset:
'''This class handles batching and shuffling a dataset.
In ``downhill``, losses are optimized using sets of data collected from the
problem that generated the loss.
During optimization, data are grouped into "mini-batches"---that is, chunks
that are larger than 1 sample and smaller than the entire set of samples;
typically the size of a mini-batch is between 10 and 100, but the specific
setting can be varied depending on your model, hardware, dataset, and so
forth. These mini-batches must be presented to the optimization algorithm in
pseudo-random order to match the underlying stochasticity assumptions of
many optimization algorithms. This class handles the process of grouping
data into mini-batches as well as iterating and shuffling these mini-batches
dynamically as the dataset is consumed by the optimization algorithm.
For many tasks, a dataset is obtained as a large block of sample data, which
in Python is normally assembled as a ``numpy`` ndarray. To use this class on
such a dataset, just pass in a list or tuple containing ``numpy`` arrays;
the number of these arrays must match the number of inputs that your loss
computation requires.
There are some cases when a suitable set of training data would be
prohibitively expensive to assemble in memory as a single ``numpy`` array.
To handle these cases, this class can also handle a dataset that is provided
via a Python callable. For more information on using callables to provide
data to your model, see :ref:`data-using-callables`.
Parameters
----------
inputs : callable or list of ndarray/sparse matrix/DataFrame/theano shared var
One or more sets of data.
If this parameter is callable, then mini-batches will be obtained by
calling the callable with no arguments; the callable is expected to
return a tuple of ndarray-like objects that will be suitable for
optimizing the loss at hand.
If this parameter is a list (or a tuple), it must contain array-like
objects: ``numpy.ndarray``, ``scipy.sparse.csc_matrix``,
``scipy.sparse.csr_matrix``, ``pandas.DataFrame`` or ``theano.shared``.
These are assumed to contain data for computing the loss, so the length
of this tuple or list should match the number of inputs required by the
loss computation. If multiple arrays are provided, their lengths along
the axis given by the ``axis`` parameter (defaults to 0) must match.
name : str, optional
A string that is used to describe this dataset. Usually something like
'test' or 'train'.
batch_size : int, optional
The size of the mini-batches to create from the data sequences. If this
is negative or zero, all data in the dataset will be used in one batch.
Defaults to 32. This parameter has no effect if ``inputs`` is callable.
iteration_size : int, optional
The number of batches to yield for each call to iterate(). Defaults to
the length of the data divided by batch_size. If the dataset is a
callable, then the number is len(callable). If callable has no length,
then the number is set to 100.
axis : int, optional
The axis along which to split the data arrays, if the first parameter is
given as one or more ndarrays. If not provided, defaults to 0.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
'''
_count = 0
def __init__(self, inputs, name=None, batch_size=32, iteration_size=None,
axis=0, rng=None):
self.name = name or 'dataset{}'.format(Dataset._count)
Dataset._count += 1
self.batch_size = batch_size
self.iteration_size = iteration_size
self.rng = rng
if rng is None or isinstance(rng, int):
self.rng = np.random.RandomState(rng)
self._inputs = None
self._slices = None
self._callable = None
if isinstance(inputs, collections.Callable):
self._init_callable(inputs)
else:
self._init_arrays(inputs, axis)
def _init_callable(self, inputs):
self._callable = inputs
if not self.iteration_size:
try:
self.iteration_size = len(inputs)
except (TypeError, AttributeError): # has no len
self.iteration_size = 100
util.log('{0.name}: {0.iteration_size} mini-batches from callable', self)
def _init_arrays(self, inputs, axis=0):
if not isinstance(inputs, (tuple, list)):
inputs = (inputs, )
shapes = []
self._inputs = []
for i, x in enumerate(inputs):
self._inputs.append(x)
if isinstance(x, np.ndarray):
shapes.append(x.shape)
continue
if isinstance(x, theano.compile.SharedVariable):
shapes.append(x.get_value(borrow=True).shape)
continue
if 'pandas.' in str(type(x)): # hacky but prevents a global import
import pandas as pd
if isinstance(x, (pd.Series, pd.DataFrame)):
shapes.append(x.shape)
continue
if 'scipy.sparse.' in str(type(x)): # same here
import scipy.sparse as ss
if isinstance(x, (ss.csr.csr_matrix, ss.csc.csc_matrix)):
shapes.append(x.shape)
continue
raise ValueError(
'input {} (type {}) must be numpy.array, theano.shared, '
'or pandas.{{Series,DataFrame}}'.format(i, type(x)))
L = shapes[0][axis]
assert all(L == s[axis] for s in shapes), \
'shapes do not match along axis {}: {}'.format(
axis, '; '.join(str(s) for s in shapes))
B = L if self.batch_size <= 0 else self.batch_size
self._index = 0
self._slices = []
for i in range(0, L, B):
where = []
for shape in shapes:
slices = [slice(None) for _ in shape]
slices[axis] = slice(i, min(L, i + B))
where.append(tuple(slices))
self._slices.append(where)
self.shuffle()
if not self.iteration_size:
self.iteration_size = len(self._slices)
util.log('{0.name}: {0.iteration_size} of {1} mini-batches from {2}',
self, len(self._slices), '; '.join(str(s) for s in shapes))
def __iter__(self):
return self.iterate(True)
def shuffle(self):
'''Shuffle the batches in the dataset.
If this dataset was constructed using a callable, this method has no
effect.
'''
if self._slices is not None:
self.rng.shuffle(self._slices)
def _next_batch(self, shuffle=True):
batch = [x.iloc[i] if hasattr(x, 'iloc') else x[i]
for x, i in zip(self._inputs, self._slices[self._index])]
self._index += 1
if self._index >= len(self._slices):
if shuffle:
self.shuffle()
self._index = 0
return batch
|
lmjohns3/downhill | downhill/util.py | shared_like | python | def shared_like(param, suffix, init=0):
'''Create a Theano shared variable like an existing parameter.
Parameters
----------
param : Theano variable
Theano variable to use for shape information.
suffix : str
Suffix to append to the parameter's name for the new variable.
init : float or ndarray, optional
Initial value of the shared variable. Defaults to 0.
Returns
-------
shared : Theano shared variable
A new shared variable with the same shape and data type as ``param``.
'''
return theano.shared(np.zeros_like(param.get_value()) + init,
name='{}_{}'.format(param.name, suffix),
broadcastable=param.broadcastable) | Create a Theano shared variable like an existing parameter.
Parameters
----------
param : Theano variable
Theano variable to use for shape information.
suffix : str
Suffix to append to the parameter's name for the new variable.
init : float or ndarray, optional
Initial value of the shared variable. Defaults to 0.
Returns
-------
shared : Theano shared variable
A new shared variable with the same shape and data type as ``param``. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/util.py#L30-L49 | null | # -*- coding: utf-8 -*-
'''A module of utility functions and other goodies.'''
import click
import datetime
import inspect
import numpy as np
import theano
import theano.tensor as TT
class Registrar(type):
'''A metaclass that builds a registry of its subclasses.'''
def __init__(cls, name, bases, dct):
if not hasattr(cls, '_registry'):
cls._registry = {}
else:
cls._registry[name.lower()] = cls
super(Registrar, cls).__init__(name, bases, dct)
def build(cls, key, *args, **kwargs):
return cls._registry[key.lower()](*args, **kwargs)
def is_registered(cls, key):
return key.lower() in cls._registry
def as_float(x):
'''Cast a floating point value to a Theano ``floatX`` symbol.
Parameters
----------
x : float, ndarray, or Theano expression
Some quantity to cast to floating point.
Returns
-------
x : Theano expression
A symbolic variable cast as a ``floatX`` value.
'''
return TT.cast(x, theano.config.floatX)
def find_inputs_and_params(node):
'''Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables.
'''
queue, seen, inputs, params = [node], set(), set(), set()
while queue:
node = queue.pop()
seen.add(node)
queue.extend(p for p in node.get_parents() if p not in seen)
if not node.get_parents():
if isinstance(node, theano.compile.SharedVariable):
params.add(node)
elif not isinstance(node, TT.Constant):
inputs.add(node)
return list(inputs), list(params)
_detailed_callsite = False
def enable_detailed_callsite_logging():
'''Enable detailed callsite logging.'''
global _detailed_callsite
_detailed_callsite = True
def log(msg, *args, **kwargs):
'''Log a message to the console.
Parameters
----------
msg : str
A string to display on the console. This can contain {}-style
formatting commands; the remaining positional and keyword arguments
will be used to fill them in.
'''
now = datetime.datetime.now()
module = 'downhill'
if _detailed_callsite:
caller = inspect.stack()[1]
parts = caller.filename.replace('.py', '').split('/')
module = '{}:{}'.format(
'.'.join(parts[parts.index('downhill')+1:]), caller.lineno)
click.echo(' '.join((
click.style(now.strftime('%Y%m%d'), fg='blue'),
click.style(now.strftime('%H%M%S'), fg='cyan'),
click.style(module, fg='magenta'),
msg.format(*args, **kwargs),
)))
def log_param(name, value):
'''Log a parameter value to the console.
Parameters
----------
name : str
Name of the parameter being logged.
value : any
Value of the parameter being logged.
'''
log('setting {} = {}', click.style(str(name)),
click.style(str(value), fg='yellow'))
|
lmjohns3/downhill | downhill/util.py | find_inputs_and_params | python | def find_inputs_and_params(node):
'''Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables.
'''
queue, seen, inputs, params = [node], set(), set(), set()
while queue:
node = queue.pop()
seen.add(node)
queue.extend(p for p in node.get_parents() if p not in seen)
if not node.get_parents():
if isinstance(node, theano.compile.SharedVariable):
params.add(node)
elif not isinstance(node, TT.Constant):
inputs.add(node)
return list(inputs), list(params) | Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/util.py#L68-L95 | null | # -*- coding: utf-8 -*-
'''A module of utility functions and other goodies.'''
import click
import datetime
import inspect
import numpy as np
import theano
import theano.tensor as TT
class Registrar(type):
'''A metaclass that builds a registry of its subclasses.'''
def __init__(cls, name, bases, dct):
if not hasattr(cls, '_registry'):
cls._registry = {}
else:
cls._registry[name.lower()] = cls
super(Registrar, cls).__init__(name, bases, dct)
def build(cls, key, *args, **kwargs):
return cls._registry[key.lower()](*args, **kwargs)
def is_registered(cls, key):
return key.lower() in cls._registry
def shared_like(param, suffix, init=0):
'''Create a Theano shared variable like an existing parameter.
Parameters
----------
param : Theano variable
Theano variable to use for shape information.
suffix : str
Suffix to append to the parameter's name for the new variable.
init : float or ndarray, optional
Initial value of the shared variable. Defaults to 0.
Returns
-------
shared : Theano shared variable
A new shared variable with the same shape and data type as ``param``.
'''
return theano.shared(np.zeros_like(param.get_value()) + init,
name='{}_{}'.format(param.name, suffix),
broadcastable=param.broadcastable)
def as_float(x):
'''Cast a floating point value to a Theano ``floatX`` symbol.
Parameters
----------
x : float, ndarray, or Theano expression
Some quantity to cast to floating point.
Returns
-------
x : Theano expression
A symbolic variable cast as a ``floatX`` value.
'''
return TT.cast(x, theano.config.floatX)
_detailed_callsite = False
def enable_detailed_callsite_logging():
'''Enable detailed callsite logging.'''
global _detailed_callsite
_detailed_callsite = True
def log(msg, *args, **kwargs):
'''Log a message to the console.
Parameters
----------
msg : str
A string to display on the console. This can contain {}-style
formatting commands; the remaining positional and keyword arguments
will be used to fill them in.
'''
now = datetime.datetime.now()
module = 'downhill'
if _detailed_callsite:
caller = inspect.stack()[1]
parts = caller.filename.replace('.py', '').split('/')
module = '{}:{}'.format(
'.'.join(parts[parts.index('downhill')+1:]), caller.lineno)
click.echo(' '.join((
click.style(now.strftime('%Y%m%d'), fg='blue'),
click.style(now.strftime('%H%M%S'), fg='cyan'),
click.style(module, fg='magenta'),
msg.format(*args, **kwargs),
)))
def log_param(name, value):
'''Log a parameter value to the console.
Parameters
----------
name : str
Name of the parameter being logged.
value : any
Value of the parameter being logged.
'''
log('setting {} = {}', click.style(str(name)),
click.style(str(value), fg='yellow'))
|
lmjohns3/downhill | downhill/util.py | log | python | def log(msg, *args, **kwargs):
'''Log a message to the console.
Parameters
----------
msg : str
A string to display on the console. This can contain {}-style
formatting commands; the remaining positional and keyword arguments
will be used to fill them in.
'''
now = datetime.datetime.now()
module = 'downhill'
if _detailed_callsite:
caller = inspect.stack()[1]
parts = caller.filename.replace('.py', '').split('/')
module = '{}:{}'.format(
'.'.join(parts[parts.index('downhill')+1:]), caller.lineno)
click.echo(' '.join((
click.style(now.strftime('%Y%m%d'), fg='blue'),
click.style(now.strftime('%H%M%S'), fg='cyan'),
click.style(module, fg='magenta'),
msg.format(*args, **kwargs),
))) | Log a message to the console.
Parameters
----------
msg : str
A string to display on the console. This can contain {}-style
formatting commands; the remaining positional and keyword arguments
will be used to fill them in. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/util.py#L107-L129 | null | # -*- coding: utf-8 -*-
'''A module of utility functions and other goodies.'''
import click
import datetime
import inspect
import numpy as np
import theano
import theano.tensor as TT
class Registrar(type):
'''A metaclass that builds a registry of its subclasses.'''
def __init__(cls, name, bases, dct):
if not hasattr(cls, '_registry'):
cls._registry = {}
else:
cls._registry[name.lower()] = cls
super(Registrar, cls).__init__(name, bases, dct)
def build(cls, key, *args, **kwargs):
return cls._registry[key.lower()](*args, **kwargs)
def is_registered(cls, key):
return key.lower() in cls._registry
def shared_like(param, suffix, init=0):
'''Create a Theano shared variable like an existing parameter.
Parameters
----------
param : Theano variable
Theano variable to use for shape information.
suffix : str
Suffix to append to the parameter's name for the new variable.
init : float or ndarray, optional
Initial value of the shared variable. Defaults to 0.
Returns
-------
shared : Theano shared variable
A new shared variable with the same shape and data type as ``param``.
'''
return theano.shared(np.zeros_like(param.get_value()) + init,
name='{}_{}'.format(param.name, suffix),
broadcastable=param.broadcastable)
def as_float(x):
'''Cast a floating point value to a Theano ``floatX`` symbol.
Parameters
----------
x : float, ndarray, or Theano expression
Some quantity to cast to floating point.
Returns
-------
x : Theano expression
A symbolic variable cast as a ``floatX`` value.
'''
return TT.cast(x, theano.config.floatX)
def find_inputs_and_params(node):
'''Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables.
'''
queue, seen, inputs, params = [node], set(), set(), set()
while queue:
node = queue.pop()
seen.add(node)
queue.extend(p for p in node.get_parents() if p not in seen)
if not node.get_parents():
if isinstance(node, theano.compile.SharedVariable):
params.add(node)
elif not isinstance(node, TT.Constant):
inputs.add(node)
return list(inputs), list(params)
_detailed_callsite = False
def enable_detailed_callsite_logging():
'''Enable detailed callsite logging.'''
global _detailed_callsite
_detailed_callsite = True
def log_param(name, value):
'''Log a parameter value to the console.
Parameters
----------
name : str
Name of the parameter being logged.
value : any
Value of the parameter being logged.
'''
log('setting {} = {}', click.style(str(name)),
click.style(str(value), fg='yellow'))
|
lmjohns3/downhill | downhill/util.py | log_param | python | def log_param(name, value):
'''Log a parameter value to the console.
Parameters
----------
name : str
Name of the parameter being logged.
value : any
Value of the parameter being logged.
'''
log('setting {} = {}', click.style(str(name)),
click.style(str(value), fg='yellow')) | Log a parameter value to the console.
Parameters
----------
name : str
Name of the parameter being logged.
value : any
Value of the parameter being logged. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/util.py#L132-L143 | [
"def log(msg, *args, **kwargs):\n '''Log a message to the console.\n\n Parameters\n ----------\n msg : str\n A string to display on the console. This can contain {}-style\n formatting commands; the remaining positional and keyword arguments\n will be used to fill them in.\n '''\n now = datetime.datetime.now()\n module = 'downhill'\n if _detailed_callsite:\n caller = inspect.stack()[1]\n parts = caller.filename.replace('.py', '').split('/')\n module = '{}:{}'.format(\n '.'.join(parts[parts.index('downhill')+1:]), caller.lineno)\n click.echo(' '.join((\n click.style(now.strftime('%Y%m%d'), fg='blue'),\n click.style(now.strftime('%H%M%S'), fg='cyan'),\n click.style(module, fg='magenta'),\n msg.format(*args, **kwargs),\n )))\n"
] | # -*- coding: utf-8 -*-
'''A module of utility functions and other goodies.'''
import click
import datetime
import inspect
import numpy as np
import theano
import theano.tensor as TT
class Registrar(type):
'''A metaclass that builds a registry of its subclasses.'''
def __init__(cls, name, bases, dct):
if not hasattr(cls, '_registry'):
cls._registry = {}
else:
cls._registry[name.lower()] = cls
super(Registrar, cls).__init__(name, bases, dct)
def build(cls, key, *args, **kwargs):
return cls._registry[key.lower()](*args, **kwargs)
def is_registered(cls, key):
return key.lower() in cls._registry
def shared_like(param, suffix, init=0):
'''Create a Theano shared variable like an existing parameter.
Parameters
----------
param : Theano variable
Theano variable to use for shape information.
suffix : str
Suffix to append to the parameter's name for the new variable.
init : float or ndarray, optional
Initial value of the shared variable. Defaults to 0.
Returns
-------
shared : Theano shared variable
A new shared variable with the same shape and data type as ``param``.
'''
return theano.shared(np.zeros_like(param.get_value()) + init,
name='{}_{}'.format(param.name, suffix),
broadcastable=param.broadcastable)
def as_float(x):
'''Cast a floating point value to a Theano ``floatX`` symbol.
Parameters
----------
x : float, ndarray, or Theano expression
Some quantity to cast to floating point.
Returns
-------
x : Theano expression
A symbolic variable cast as a ``floatX`` value.
'''
return TT.cast(x, theano.config.floatX)
def find_inputs_and_params(node):
'''Walk a computation graph and extract root variables.
Parameters
----------
node : Theano expression
A symbolic Theano expression to walk.
Returns
-------
inputs : list Theano variables
A list of candidate inputs for this graph. Inputs are nodes in the graph
with no parents that are not shared and are not constants.
params : list of Theano shared variables
A list of candidate parameters for this graph. Parameters are nodes in
the graph that are shared variables.
'''
queue, seen, inputs, params = [node], set(), set(), set()
while queue:
node = queue.pop()
seen.add(node)
queue.extend(p for p in node.get_parents() if p not in seen)
if not node.get_parents():
if isinstance(node, theano.compile.SharedVariable):
params.add(node)
elif not isinstance(node, TT.Constant):
inputs.add(node)
return list(inputs), list(params)
_detailed_callsite = False
def enable_detailed_callsite_logging():
'''Enable detailed callsite logging.'''
global _detailed_callsite
_detailed_callsite = True
def log(msg, *args, **kwargs):
'''Log a message to the console.
Parameters
----------
msg : str
A string to display on the console. This can contain {}-style
formatting commands; the remaining positional and keyword arguments
will be used to fill them in.
'''
now = datetime.datetime.now()
module = 'downhill'
if _detailed_callsite:
caller = inspect.stack()[1]
parts = caller.filename.replace('.py', '').split('/')
module = '{}:{}'.format(
'.'.join(parts[parts.index('downhill')+1:]), caller.lineno)
click.echo(' '.join((
click.style(now.strftime('%Y%m%d'), fg='blue'),
click.style(now.strftime('%H%M%S'), fg='cyan'),
click.style(module, fg='magenta'),
msg.format(*args, **kwargs),
)))
|
lmjohns3/downhill | examples/mnist-sparse-factorization.py | load_mnist | python | def load_mnist():
'''Load the MNIST digits dataset.'''
mnist = skdata.mnist.dataset.MNIST()
mnist.meta # trigger download if needed.
def arr(n, dtype):
arr = mnist.arrays[n]
return arr.reshape((len(arr), -1)).astype(dtype)
train_images = arr('train_images', np.float32) / 128 - 1
train_labels = arr('train_labels', np.uint8)
return ((train_images[:50000], train_labels[:50000, 0]),
(train_images[50000:], train_labels[50000:, 0])) | Load the MNIST digits dataset. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/examples/mnist-sparse-factorization.py#L11-L22 | [
"def arr(n, dtype):\n arr = mnist.arrays[n]\n return arr.reshape((len(arr), -1)).astype(dtype)\n"
] | import downhill
import matplotlib.pyplot as plt
import numpy as np
import skdata.mnist
import theano
import theano.tensor as TT
FLOAT = 'df'[theano.config.floatX == 'float32']
def plot_images(imgs, loc=111, title=None, channels=1):
'''Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s*c) --
that is, there are n^2 images along the first axis of the array, and each
image is c squares measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
'''
n = int(np.sqrt(len(imgs)))
assert n * n == len(imgs), 'images array must contain a square number of rows!'
s = int(np.sqrt(len(imgs[0]) / channels))
assert s * s == len(imgs[0]) / channels, 'images must be square!'
img = np.zeros((s * n, s * n, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * s:(r+1) * s, c * s:(c+1) * s] = pix.reshape((s, s, channels))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(loc)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
if title:
ax.set_title(title)
(t_images, t_labels), (v_images, v_labels) = load_mnist()
# construct training/validation sets consisting of the fours.
train = t_images[t_labels == 4]
valid = v_images[v_labels == 4]
N = 20
K = 20
B = 784
x = TT.matrix('x')
u = theano.shared(np.random.randn(N * N, K * K).astype(FLOAT), name='u')
v = theano.shared(np.random.randn(K * K, B).astype(FLOAT), name='v')
err = TT.sqr(x - TT.dot(u, v)).mean()
downhill.minimize(
loss=err + 100 * (0.01 * abs(u).mean() + (v * v).mean()),
params=[u, v],
inputs=[x],
train=train,
valid=valid,
batch_size=N * N,
monitor_gradients=True,
monitors=[
('err', err),
('u<-0.5', (u < -0.5).mean()),
('u<-0.1', (u < -0.1).mean()),
('u<0.1', (u < 0.1).mean()),
('u<0.5', (u < 0.5).mean()),
],
algo='sgd',
max_gradient_clip=1,
learning_rate=0.5,
momentum=0.9,
patience=3,
min_improvement=0.1,
)
plot_images(v.get_value(), 121)
plot_images(np.dot(u.get_value(), v.get_value()), 122)
plt.show()
|
lmjohns3/downhill | downhill/base.py | build | python | def build(algo, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
'''Construct an optimizer by name.
Parameters
----------
algo : str
The name of the optimization algorithm to build.
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internal of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : dict or sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as either a sequence of (name, expression) tuples, or as a dictionary
mapping string names to Theano expressions.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
Returns
-------
optimizer : :class:`Optimizer`
An optimizer instance.
'''
return Optimizer.build(algo, loss, params, inputs,
updates=updates, monitors=monitors,
monitor_gradients=monitor_gradients) | Construct an optimizer by name.
Parameters
----------
algo : str
The name of the optimization algorithm to build.
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internal of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : dict or sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as either a sequence of (name, expression) tuples, or as a dictionary
mapping string names to Theano expressions.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
Returns
-------
optimizer : :class:`Optimizer`
An optimizer instance. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L15-L50 | null | # -*- coding: utf-8 -*-
'''This module defines a base class for optimization techniques.'''
import click
import collections
import numpy as np
import theano
import theano.tensor as TT
import warnings
from . import util
class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer._compile | python | def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label) | Compile the Theano functions for evaluating and updating our model. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L153-L167 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer.get_updates | python | def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t | Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L169-L202 | [
"def as_float(x):\n '''Cast a floating point value to a Theano ``floatX`` symbol.\n\n Parameters\n ----------\n x : float, ndarray, or Theano expression\n Some quantity to cast to floating point.\n\n Returns\n -------\n x : Theano expression\n A symbolic variable cast as a ``floatX`` value.\n '''\n return TT.cast(x, theano.config.floatX)\n",
"def shared_like(param, suffix, init=0):\n '''Create a Theano shared variable like an existing parameter.\n\n Parameters\n ----------\n param : Theano variable\n Theano variable to use for shape information.\n suffix : str\n Suffix to append to the parameter's name for the new variable.\n init : float or ndarray, optional\n Initial value of the shared variable. Defaults to 0.\n\n Returns\n -------\n shared : Theano shared variable\n A new shared variable with the same shape and data type as ``param``.\n '''\n return theano.shared(np.zeros_like(param.get_value()) + init,\n name='{}_{}'.format(param.name, suffix),\n broadcastable=param.broadcastable)\n"
] | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer._differentiate | python | def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad | Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L214-L244 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer.set_params | python | def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target) | Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L246-L259 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer._log | python | def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix)) | Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L261-L279 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer.evaluate | python | def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors) | Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L281-L301 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer._prepare | python | def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov) | Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value). | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L329-L352 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer.iterate | python | def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best') | r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L354-L415 | [
"def log(msg, *args, **kwargs):\n '''Log a message to the console.\n\n Parameters\n ----------\n msg : str\n A string to display on the console. This can contain {}-style\n formatting commands; the remaining positional and keyword arguments\n will be used to fill them in.\n '''\n now = datetime.datetime.now()\n module = 'downhill'\n if _detailed_callsite:\n caller = inspect.stack()[1]\n parts = caller.filename.replace('.py', '').split('/')\n module = '{}:{}'.format(\n '.'.join(parts[parts.index('downhill')+1:]), caller.lineno)\n click.echo(' '.join((\n click.style(now.strftime('%Y%m%d'), fg='blue'),\n click.style(now.strftime('%H%M%S'), fg='cyan'),\n click.style(module, fg='magenta'),\n msg.format(*args, **kwargs),\n )))\n"
] | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer.minimize | python | def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors | Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L417-L436 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
lmjohns3/downhill | downhill/base.py | Optimizer._step | python | def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0))) | Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values. | train | https://github.com/lmjohns3/downhill/blob/42111ab03b5e6fa47b7bf7c7cb5caa402f10ce6d/downhill/base.py#L438-L456 | null | class Optimizer(util.Registrar(str('Base'), (), {})):
'''An optimizer computes gradient updates to iteratively optimize a loss.
Attributes
----------
patience : int, optional
Number of validation "failures" that we are willing to tolerate before
stopping the optimization process. A validation failure happens whenever
the loss on the validation dataset decreases by less than
``min_improvement`` (relative) over the previous best validation loss.
Defaults to 5.
validate_every : int, optional
Evaluate the loss on the validation dataset after making this many
passes over the training data. Defaults to 10.
min_improvement : float, optional
Insist that the validation loss must improve by this relative amount
before considering that the optimization has made progress. The
optimization process halts when ``patience`` validations have failed to
make this relative improvement. Defaults to 0; set to a larger value
(e.g., 0.01 for 1% improvement) to halt the optimization process sooner.
max_gradient_norm : float, optional
Rescale each parameter's gradient so that it has at most this L2 norm.
Set to 0 (the default) to disable norm rescaling. If
``max_gradient_elem`` is also specified, then this has no effect.
max_gradient_elem : float, optional
Perform elementwise clipping on the magnitude of gradient values. Set to
0 (the default) to disable. If elementwise clipping is enabled, norm
rescaling (via ``max_gradient_norm``) will have no effect. Deprecated
synonyms of this parameter are "max_gradient_clip" and "gradient_clip".
learning_rate : float, optional
Many SGD-based optimization algorithms require a learning rate
hyperparameter that scales the gradient step. Defaults to 1e-4.
momentum : float, optional
Apply momentum to the parameter updates for this optimizer, with the
given strength. Typically this value ranges from 0 (no momentum) to
:math:`1 - \epsilon` (large momentum). Defaults to 0.
nesterov : bool, optional
If True, and ``momentum`` is nonzero, apply Nesterov-style momentum to
parameter updates for this optimizer. If False, and ``momentum`` is
nonzero, "regular" momentum is applied. Has no effect if ``momentum`` is
zero. See :class:`NAG <downhill.NAG>` for a description of Nesterov
momentum.
Parameters
----------
loss : Theano expression
Loss function to minimize. This must be a scalar-valued expression.
params : list of Theano variables, optional
Symbolic variables to adjust to minimize the loss. If not given, these
will be computed automatically by walking the computation graph.
inputs : list of Theano variables, optional
Symbolic variables required to compute the loss. If not given, these
will be computed automatically by walking the computation graph.
updates : list of update pairs, optional
A list of pairs providing updates for the internals of the loss
computation. Normally this is empty, but it can be provided if the loss,
for example, requires an update to an internal random number generator.
monitors : sequence of (str, Theano expression) tuples, optional
Additional values to monitor during optimization. These must be provided
as a sequence of (name, expression) tuples.
monitor_gradients : bool, optional
If True, add monitors to log the norms of the parameter gradients during
optimization. Defaults to False.
'''
def __init__(self, loss, params=None, inputs=None, updates=(), monitors=(),
monitor_gradients=False):
inputs_, params_ = util.find_inputs_and_params(loss)
self._loss = loss
self._params = params or params_
self._inputs = inputs or inputs_
self._updates = updates
self._shapes = [p.get_value(borrow=True).shape for p in self._params]
self._counts = [np.prod(s) for s in self._shapes]
self._starts = np.cumsum([0] + self._counts)[:-1]
self._dtype = self._params[0].get_value().dtype
self._curr_iter = 0
self._best_iter = 0
self._best_loss = 1e100
self._best_params = [p.get_value().copy() for p in self._params]
self._monitor_exprs = [self._loss]
self._monitor_names = ['loss']
for name, monitor in monitors:
self._monitor_names.append(name)
self._monitor_exprs.append(monitor)
if monitor_gradients:
unnamed = 0
for p, g in zip(self._params, TT.grad(self._loss, self._params)):
name = p.name
if not name:
name = 'unnamed{}'.format(unnamed)
unnamed += 1
util.log('"{}" unnamed, will be "{}" internally'.format(p, name))
self._monitor_names.append('grad({})'.format(name))
self._monitor_exprs.append((g * g).sum())
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label)
def get_updates(self, **kwargs):
'''Get parameter update expressions for performing optimization.
Keyword arguments can be applied here to set any of the global
optimizer attributes.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
self._prepare(**kwargs)
for param, grad in self._differentiate():
for var, update in self._get_updates_for(param, grad):
# For auxiliary variables, updates are meant to replace the
# existing variable value.
if var != param:
yield var, update
continue
# If momentum is disabled, just apply the parameter delta.
if self.momentum == 0:
yield var, param - update
continue
# Momentum is enabled, so we keep track of velocity here.
vel_tm1 = util.shared_like(param, 'vel')
vel_t = util.as_float(self.momentum) * vel_tm1 - update
if self.nesterov:
# see http://arxiv.org/pdf/1212.0901v2.pdf (eq 7) and
# https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
mom_sqr = util.as_float(self.momentum ** 2)
mom_inc = util.as_float(1 + self.momentum)
vel_t = mom_sqr * vel_tm1 - mom_inc * update
yield vel_tm1, vel_t
yield param, param + vel_t
def _get_updates_for(self, param, grad):
'''Generate some update pairs for the given model parameter.
Yields
------
updates : (parameter, expression) tuples
A sequence of parameter updates to be applied during optimization.
'''
raise NotImplementedError
def _differentiate(self, params=None):
'''Return a sequence of gradients for our parameters.
If this optimizer has been configured with a gradient norm limit, or
with elementwise gradient clipping, this method applies the appropriate
rescaling and clipping operations before returning the gradient.
Parameters
----------
params : list of Theano variables, optional
Return the gradient with respect to these parameters. Defaults to
all parameters that the optimizer knows about.
Yields
------
pairs : (param, grad) tuples
Generates a sequence of tuples representing each of the parameters
requested and the corresponding Theano gradient expressions.
'''
if params is None:
params = self._params
for param, grad in zip(params, TT.grad(self._loss, params)):
if self.max_gradient_elem > 0:
limit = util.as_float(self.max_gradient_elem)
yield param, TT.clip(grad, -limit, limit)
elif self.max_gradient_norm > 0:
norm = TT.sqrt((grad * grad).sum())
limit = util.as_float(self.max_gradient_norm)
yield param, grad * TT.minimum(1, limit / norm)
else:
yield param, grad
def set_params(self, targets=None):
'''Set the values of the parameters to the given target values.
Parameters
----------
targets : sequence of ndarray, optional
Arrays for setting the parameters of our model. If this is not
provided, the current best parameters for this optimizer will be
used.
'''
if not isinstance(targets, (list, tuple)):
targets = self._best_params
for param, target in zip(self._params, targets):
param.set_value(target)
def _log(self, monitors, iteration, label='', suffix=''):
'''Log the state of the optimizer on the console.
Parameters
----------
monitors : OrderedDict
A dictionary of monitor names mapped to values. These names and
values are what is being logged.
iteration : int
Optimization iteration that we are logging.
label : str, optional
A label for the name of the optimizer creating the log line.
Defaults to the name of the current class.
suffix : str, optional
A suffix to add to the end of the log line, if any.
'''
label = label or self.__class__.__name__
fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items())
util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors)
def _test_patience(self, monitors):
'''Test whether our patience with optimization has elapsed.
Parameters
----------
monitors : dict
A dictionary mapping monitor names to values. The 'loss' key from
this dictionary will be used to evaluate optimization progress.
Returns
-------
elapsed : bool
True iff our patience has elapsed and the model is no longer
improving.
'''
self._curr_iter += 1
marker = ''
loss = monitors['loss']
if self._best_loss - loss > self._best_loss * self.min_improvement:
self._best_loss = loss
self._best_iter = self._curr_iter
self._best_params = [p.get_value().copy() for p in self._params]
marker = ' *'
self._log(monitors, self._curr_iter - 1, 'validation', marker)
return self._curr_iter - self._best_iter > self.patience
def _prepare(self, **kwargs):
'''Set up properties for optimization.
This method can be overridden by base classes to provide parameters that
are specific to a particular optimization technique (e.g., setting up a
learning rate value).
'''
self.learning_rate = util.as_float(kwargs.pop('learning_rate', 1e-4))
self.momentum = kwargs.pop('momentum', 0)
self.nesterov = kwargs.pop('nesterov', False)
self.patience = kwargs.get('patience', 5)
self.validate_every = kwargs.pop('validate_every', 10)
self.min_improvement = kwargs.pop('min_improvement', 0)
self.max_gradient_norm = kwargs.pop('max_gradient_norm', 0)
self.max_gradient_elem = kwargs.pop('max_gradient_elem', 0)
util.log_param('patience', self.patience)
util.log_param('validate_every', self.validate_every)
util.log_param('min_improvement', self.min_improvement)
util.log_param('max_gradient_norm', self.max_gradient_norm)
util.log_param('max_gradient_elem', self.max_gradient_elem)
util.log_param('learning_rate', self.learning_rate)
util.log_param('momentum', self.momentum)
util.log_param('nesterov', self.nesterov)
def iterate(self, train=None, valid=None, max_updates=None, **kwargs):
r'''Optimize a loss iteratively using a training and validation dataset.
This method yields a series of monitor values to the caller. After every
optimization epoch, a pair of monitor dictionaries is generated: one
evaluated on the training dataset during the epoch, and another
evaluated on the validation dataset at the most recent validation epoch.
The validation monitors might not be updated during every optimization
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Additional keyword arguments supplied here will set the global
optimizer attributes.
Parameters
----------
train : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : sequence or :class:`Dataset <downhill.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving. Defaults to the
training data.
max_updates : int, optional
If specified, halt optimization after this many gradient updates
have been processed. If not provided, uses early stopping to decide
when to halt.
Yields
------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
self._compile(**kwargs)
if valid is None:
valid = train
iteration = 0
training = validation = None
while max_updates is None or iteration < max_updates:
if not iteration % self.validate_every:
try:
validation = self.evaluate(valid)
except KeyboardInterrupt:
util.log('interrupted!')
break
if self._test_patience(validation):
util.log('patience elapsed!')
break
try:
training = self._step(train)
except KeyboardInterrupt:
util.log('interrupted!')
break
iteration += 1
self._log(training, iteration)
yield training, validation
self.set_params('best')
def minimize(self, *args, **kwargs):
'''Optimize our loss exhaustively.
This method is a thin wrapper over the :func:`iterate` method. It simply
exhausts the iterative optimization process and returns the final
monitor values.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
valid_monitors : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
monitors = None
for monitors in self.iterate(*args, **kwargs):
pass
return monitors
|
HdrHistogram/HdrHistogram_py | hdrh/codec.py | HdrPayload.init_counts | python | def init_counts(self, counts_len):
'''Called after instantiating with a compressed payload
Params:
counts_len counts size to use based on decoded settings in the header
'''
assert self._data and counts_len and self.counts_len == 0
self.counts_len = counts_len
self._init_counts()
results = decode(self._data, payload_header_size, addressof(self.counts),
counts_len, self.word_size)
# no longer needed
self._data = None
return results | Called after instantiating with a compressed payload
Params:
counts_len counts size to use based on decoded settings in the header | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/codec.py#L156-L169 | [
"def _init_counts(self):\n self.counts = (self.counter_ctype * self.counts_len)()\n"
] | class HdrPayload():
'''A class that wraps the ctypes big endian struct that will hold the
histogram wire format content (including the counters).
'''
def __init__(self, word_size, counts_len=0, compressed_payload=None):
'''Two ways to use this class:
- for an empty histogram (pass counts_len>0 and compressed_payload=None)
- for a decoded histogram (counts_len=0 and compressed_payload!=None)
Params:
word_size counter size in bytes (2,4,8 byte counters are supported)
counts_len number of counters to allocate
ignored if a compressed payload is provided (not None)
compressed_payload (string) a payload in zlib compressed form,
decompress and decode the payload header.
Caller must then invoke init_counts to pass in counts_len so that the
counts array can be updated from the decoded varint buffer
None if no compressed payload is to be associated to this instance
'''
self.word_size = word_size
self.counts_len = counts_len
self._data = None
try:
# ctype counter type
self.counter_ctype = payload_counter_ctype[word_size]
except IndexError:
raise ValueError('Invalid word size')
if not self.counter_ctype:
raise ValueError('Invalid word size')
if compressed_payload:
self._decompress(compressed_payload)
elif counts_len:
self.payload = PayloadHeader()
self.payload.cookie = get_encoding_cookie()
self._init_counts()
else:
raise RuntimeError('counts_len cannot be zero')
def _init_counts(self):
self.counts = (self.counter_ctype * self.counts_len)()
def get_counts(self):
return self.counts
def _decompress(self, compressed_payload):
'''Decompress a compressed payload into this payload wrapper.
Note that the decompressed buffer is saved in self._data and the
counts array is not yet allocated.
Args:
compressed_payload (string) a payload in zlib compressed form
Exception:
HdrCookieException:
the compressed payload has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
'''
# make sure this instance is pristine
if self._data:
raise RuntimeError('Cannot decompress to an instance with payload')
# Here it is important to keep a reference to the decompressed
# string so that it does not get garbage collected
self._data = zlib.decompress(compressed_payload)
len_data = len(self._data)
counts_size = len_data - payload_header_size
if payload_header_size > counts_size > MAX_COUNTS_SIZE:
raise HdrLengthException('Invalid size:' + str(len_data))
# copy the first bytes for the header
self.payload = PayloadHeader.from_buffer_copy(self._data)
cookie = self.payload.cookie
if get_cookie_base(cookie) != V2_ENCODING_COOKIE_BASE:
raise HdrCookieException('Invalid cookie: %x' % cookie)
word_size = get_word_size_in_bytes_from_cookie(cookie)
if word_size != V2_MAX_WORD_SIZE_IN_BYTES:
raise HdrCookieException('Invalid V2 cookie: %x' % cookie)
def compress(self, counts_limit):
'''Compress this payload instance
Args:
counts_limit how many counters should be encoded
starting from index 0 (can be 0),
Return:
the compressed payload (python string)
'''
if self.payload:
# worst case varint encoded length is when each counter is at the maximum value
# in this case 1 more byte per counter is needed due to the more bits
varint_len = counts_limit * (self.word_size + 1)
# allocate enough space to fit the header and the varint string
encode_buf = (c_byte * (payload_header_size + varint_len))()
# encode past the payload header
varint_len = encode(addressof(self.counts), counts_limit,
self.word_size,
addressof(encode_buf) + payload_header_size,
varint_len)
# copy the header after updating the varint stream length
self.payload.payload_len = varint_len
ctypes.memmove(addressof(encode_buf), addressof(self.payload), payload_header_size)
cdata = zlib.compress(ctypes.string_at(encode_buf, payload_header_size + varint_len))
return cdata
# can't compress if no payload
raise RuntimeError('No payload to compress')
def dump(self, label=None):
if label:
print('Payload Dump ' + label)
print(' payload cookie: %x' % (self.payload.cookie))
print(' payload_len: %d' % (self.payload.payload_len))
print(' counts_len: %d' % (self.counts_len))
dump_payload(self.get_counts(), self.counts_len)
|
HdrHistogram/HdrHistogram_py | hdrh/codec.py | HdrPayload._decompress | python | def _decompress(self, compressed_payload):
'''Decompress a compressed payload into this payload wrapper.
Note that the decompressed buffer is saved in self._data and the
counts array is not yet allocated.
Args:
compressed_payload (string) a payload in zlib compressed form
Exception:
HdrCookieException:
the compressed payload has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
'''
# make sure this instance is pristine
if self._data:
raise RuntimeError('Cannot decompress to an instance with payload')
# Here it is important to keep a reference to the decompressed
# string so that it does not get garbage collected
self._data = zlib.decompress(compressed_payload)
len_data = len(self._data)
counts_size = len_data - payload_header_size
if payload_header_size > counts_size > MAX_COUNTS_SIZE:
raise HdrLengthException('Invalid size:' + str(len_data))
# copy the first bytes for the header
self.payload = PayloadHeader.from_buffer_copy(self._data)
cookie = self.payload.cookie
if get_cookie_base(cookie) != V2_ENCODING_COOKIE_BASE:
raise HdrCookieException('Invalid cookie: %x' % cookie)
word_size = get_word_size_in_bytes_from_cookie(cookie)
if word_size != V2_MAX_WORD_SIZE_IN_BYTES:
raise HdrCookieException('Invalid V2 cookie: %x' % cookie) | Decompress a compressed payload into this payload wrapper.
Note that the decompressed buffer is saved in self._data and the
counts array is not yet allocated.
Args:
compressed_payload (string) a payload in zlib compressed form
Exception:
HdrCookieException:
the compressed payload has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/codec.py#L174-L211 | [
"def get_cookie_base(cookie):\n return cookie & ~0xf0\n",
"def get_word_size_in_bytes_from_cookie(cookie):\n if (get_cookie_base(cookie) == V2_ENCODING_COOKIE_BASE) or \\\n (get_cookie_base(cookie) == V2_COMPRESSION_COOKIE_BASE):\n return V2_MAX_WORD_SIZE_IN_BYTES\n return (cookie & 0xf0) >> 4\n"
] | class HdrPayload():
'''A class that wraps the ctypes big endian struct that will hold the
histogram wire format content (including the counters).
'''
def __init__(self, word_size, counts_len=0, compressed_payload=None):
'''Two ways to use this class:
- for an empty histogram (pass counts_len>0 and compressed_payload=None)
- for a decoded histogram (counts_len=0 and compressed_payload!=None)
Params:
word_size counter size in bytes (2,4,8 byte counters are supported)
counts_len number of counters to allocate
ignored if a compressed payload is provided (not None)
compressed_payload (string) a payload in zlib compressed form,
decompress and decode the payload header.
Caller must then invoke init_counts to pass in counts_len so that the
counts array can be updated from the decoded varint buffer
None if no compressed payload is to be associated to this instance
'''
self.word_size = word_size
self.counts_len = counts_len
self._data = None
try:
# ctype counter type
self.counter_ctype = payload_counter_ctype[word_size]
except IndexError:
raise ValueError('Invalid word size')
if not self.counter_ctype:
raise ValueError('Invalid word size')
if compressed_payload:
self._decompress(compressed_payload)
elif counts_len:
self.payload = PayloadHeader()
self.payload.cookie = get_encoding_cookie()
self._init_counts()
else:
raise RuntimeError('counts_len cannot be zero')
def _init_counts(self):
self.counts = (self.counter_ctype * self.counts_len)()
def init_counts(self, counts_len):
'''Called after instantiating with a compressed payload
Params:
counts_len counts size to use based on decoded settings in the header
'''
assert self._data and counts_len and self.counts_len == 0
self.counts_len = counts_len
self._init_counts()
results = decode(self._data, payload_header_size, addressof(self.counts),
counts_len, self.word_size)
# no longer needed
self._data = None
return results
def get_counts(self):
return self.counts
def compress(self, counts_limit):
'''Compress this payload instance
Args:
counts_limit how many counters should be encoded
starting from index 0 (can be 0),
Return:
the compressed payload (python string)
'''
if self.payload:
# worst case varint encoded length is when each counter is at the maximum value
# in this case 1 more byte per counter is needed due to the more bits
varint_len = counts_limit * (self.word_size + 1)
# allocate enough space to fit the header and the varint string
encode_buf = (c_byte * (payload_header_size + varint_len))()
# encode past the payload header
varint_len = encode(addressof(self.counts), counts_limit,
self.word_size,
addressof(encode_buf) + payload_header_size,
varint_len)
# copy the header after updating the varint stream length
self.payload.payload_len = varint_len
ctypes.memmove(addressof(encode_buf), addressof(self.payload), payload_header_size)
cdata = zlib.compress(ctypes.string_at(encode_buf, payload_header_size + varint_len))
return cdata
# can't compress if no payload
raise RuntimeError('No payload to compress')
def dump(self, label=None):
if label:
print('Payload Dump ' + label)
print(' payload cookie: %x' % (self.payload.cookie))
print(' payload_len: %d' % (self.payload.payload_len))
print(' counts_len: %d' % (self.counts_len))
dump_payload(self.get_counts(), self.counts_len)
|
HdrHistogram/HdrHistogram_py | hdrh/codec.py | HdrPayload.compress | python | def compress(self, counts_limit):
'''Compress this payload instance
Args:
counts_limit how many counters should be encoded
starting from index 0 (can be 0),
Return:
the compressed payload (python string)
'''
if self.payload:
# worst case varint encoded length is when each counter is at the maximum value
# in this case 1 more byte per counter is needed due to the more bits
varint_len = counts_limit * (self.word_size + 1)
# allocate enough space to fit the header and the varint string
encode_buf = (c_byte * (payload_header_size + varint_len))()
# encode past the payload header
varint_len = encode(addressof(self.counts), counts_limit,
self.word_size,
addressof(encode_buf) + payload_header_size,
varint_len)
# copy the header after updating the varint stream length
self.payload.payload_len = varint_len
ctypes.memmove(addressof(encode_buf), addressof(self.payload), payload_header_size)
cdata = zlib.compress(ctypes.string_at(encode_buf, payload_header_size + varint_len))
return cdata
# can't compress if no payload
raise RuntimeError('No payload to compress') | Compress this payload instance
Args:
counts_limit how many counters should be encoded
starting from index 0 (can be 0),
Return:
the compressed payload (python string) | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/codec.py#L213-L241 | null | class HdrPayload():
'''A class that wraps the ctypes big endian struct that will hold the
histogram wire format content (including the counters).
'''
def __init__(self, word_size, counts_len=0, compressed_payload=None):
'''Two ways to use this class:
- for an empty histogram (pass counts_len>0 and compressed_payload=None)
- for a decoded histogram (counts_len=0 and compressed_payload!=None)
Params:
word_size counter size in bytes (2,4,8 byte counters are supported)
counts_len number of counters to allocate
ignored if a compressed payload is provided (not None)
compressed_payload (string) a payload in zlib compressed form,
decompress and decode the payload header.
Caller must then invoke init_counts to pass in counts_len so that the
counts array can be updated from the decoded varint buffer
None if no compressed payload is to be associated to this instance
'''
self.word_size = word_size
self.counts_len = counts_len
self._data = None
try:
# ctype counter type
self.counter_ctype = payload_counter_ctype[word_size]
except IndexError:
raise ValueError('Invalid word size')
if not self.counter_ctype:
raise ValueError('Invalid word size')
if compressed_payload:
self._decompress(compressed_payload)
elif counts_len:
self.payload = PayloadHeader()
self.payload.cookie = get_encoding_cookie()
self._init_counts()
else:
raise RuntimeError('counts_len cannot be zero')
def _init_counts(self):
self.counts = (self.counter_ctype * self.counts_len)()
def init_counts(self, counts_len):
'''Called after instantiating with a compressed payload
Params:
counts_len counts size to use based on decoded settings in the header
'''
assert self._data and counts_len and self.counts_len == 0
self.counts_len = counts_len
self._init_counts()
results = decode(self._data, payload_header_size, addressof(self.counts),
counts_len, self.word_size)
# no longer needed
self._data = None
return results
def get_counts(self):
return self.counts
def _decompress(self, compressed_payload):
'''Decompress a compressed payload into this payload wrapper.
Note that the decompressed buffer is saved in self._data and the
counts array is not yet allocated.
Args:
compressed_payload (string) a payload in zlib compressed form
Exception:
HdrCookieException:
the compressed payload has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
'''
# make sure this instance is pristine
if self._data:
raise RuntimeError('Cannot decompress to an instance with payload')
# Here it is important to keep a reference to the decompressed
# string so that it does not get garbage collected
self._data = zlib.decompress(compressed_payload)
len_data = len(self._data)
counts_size = len_data - payload_header_size
if payload_header_size > counts_size > MAX_COUNTS_SIZE:
raise HdrLengthException('Invalid size:' + str(len_data))
# copy the first bytes for the header
self.payload = PayloadHeader.from_buffer_copy(self._data)
cookie = self.payload.cookie
if get_cookie_base(cookie) != V2_ENCODING_COOKIE_BASE:
raise HdrCookieException('Invalid cookie: %x' % cookie)
word_size = get_word_size_in_bytes_from_cookie(cookie)
if word_size != V2_MAX_WORD_SIZE_IN_BYTES:
raise HdrCookieException('Invalid V2 cookie: %x' % cookie)
def dump(self, label=None):
if label:
print('Payload Dump ' + label)
print(' payload cookie: %x' % (self.payload.cookie))
print(' payload_len: %d' % (self.payload.payload_len))
print(' counts_len: %d' % (self.counts_len))
dump_payload(self.get_counts(), self.counts_len)
|
HdrHistogram/HdrHistogram_py | hdrh/codec.py | HdrHistogramEncoder.encode | python | def encode(self):
'''Compress the associated encodable payload,
prepend the header then encode with base64 if requested
Returns:
the b64 encoded wire encoding of the histogram (as a string)
or the compressed payload (as a string, if b64 wrappinb is disabled)
'''
# only compress the first non zero buckets
# if histogram is empty we do not encode any counter
if self.histogram.total_count:
relevant_length = \
self.histogram.get_counts_array_index(self.histogram.max_value) + 1
else:
relevant_length = 0
cpayload = self.payload.compress(relevant_length)
if self.b64_wrap:
self.header.length = len(cpayload)
header_str = ctypes.string_at(addressof(self.header), ext_header_size)
return base64.b64encode(header_str + cpayload)
return cpayload | Compress the associated encodable payload,
prepend the header then encode with base64 if requested
Returns:
the b64 encoded wire encoding of the histogram (as a string)
or the compressed payload (as a string, if b64 wrappinb is disabled) | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/codec.py#L290-L310 | null | class HdrHistogramEncoder():
'''An encoder class for histograms, only supports V1 encoding.
'''
def __init__(self, histogram, b64_wrap=True, hdr_payload=None):
'''Histogram encoder
Args:
histogram the histogram to encode/decode into
b64_wrap determines if the base64 wrapper is enabled or not
hdr_payload if None will create a new HdrPayload instance for this
encoder, else will reuse the passed Hdrayload instance (useful
after decoding one and to associate it to a new histogram)
word_size counters size in bytes (2, 4 or 8)
Exceptions:
ValueError if the word_size value is unsupported
'''
self.histogram = histogram
if not hdr_payload:
self.payload = HdrPayload(histogram.word_size, histogram.counts_len)
payload = self.payload.payload
# those values never change across encodings
payload.normalizing_index_offset = 0
payload.conversion_ratio_bits = 1
payload.significant_figures = histogram.significant_figures
payload.lowest_trackable_value = histogram.lowest_trackable_value
payload.highest_trackable_value = histogram.highest_trackable_value
else:
self.payload = hdr_payload
self.b64_wrap = b64_wrap
self.header = ExternalHeader()
self.header.cookie = get_compression_cookie()
def get_counts(self):
'''Retrieve the counts array that can be used to store live counters
and that can be encoded with minimal copies using encode()
'''
return self.payload.get_counts()
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode a wire histogram encoding into a read-only Hdr Payload instance
Args:
encoded_histogram a string containing the wire encoding of a histogram
such as one returned from encode()
Returns:
an hdr_payload instance with all the decoded/uncompressed fields
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
zlib.error:
in case of zlib decompression error
'''
if b64_wrap:
b64decode = base64.b64decode(encoded_histogram)
# this string has 2 parts in it: the header (raw) and the payload (compressed)
b64dec_len = len(b64decode)
if b64dec_len < ext_header_size:
raise HdrLengthException('Base64 decoded message too short')
header = ExternalHeader.from_buffer_copy(b64decode)
if get_cookie_base(header.cookie) != V2_COMPRESSION_COOKIE_BASE:
raise HdrCookieException()
if header.length != b64dec_len - ext_header_size:
raise HdrLengthException('Decoded length=%d buffer length=%d' %
(header.length, b64dec_len - ext_header_size))
# this will result in a copy of the compressed payload part
# could not find a way to do otherwise since zlib.decompress()
# expects a string (and does not like a buffer or a memoryview object)
cpayload = b64decode[ext_header_size:]
else:
cpayload = encoded_histogram
hdr_payload = HdrPayload(8, compressed_payload=cpayload)
return hdr_payload
def add(self, other_encoder):
add_array(addressof(self.get_counts()),
addressof(other_encoder.get_counts()),
self.histogram.counts_len,
self.histogram.word_size)
|
HdrHistogram/HdrHistogram_py | hdrh/codec.py | HdrHistogramEncoder.decode | python | def decode(encoded_histogram, b64_wrap=True):
'''Decode a wire histogram encoding into a read-only Hdr Payload instance
Args:
encoded_histogram a string containing the wire encoding of a histogram
such as one returned from encode()
Returns:
an hdr_payload instance with all the decoded/uncompressed fields
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
zlib.error:
in case of zlib decompression error
'''
if b64_wrap:
b64decode = base64.b64decode(encoded_histogram)
# this string has 2 parts in it: the header (raw) and the payload (compressed)
b64dec_len = len(b64decode)
if b64dec_len < ext_header_size:
raise HdrLengthException('Base64 decoded message too short')
header = ExternalHeader.from_buffer_copy(b64decode)
if get_cookie_base(header.cookie) != V2_COMPRESSION_COOKIE_BASE:
raise HdrCookieException()
if header.length != b64dec_len - ext_header_size:
raise HdrLengthException('Decoded length=%d buffer length=%d' %
(header.length, b64dec_len - ext_header_size))
# this will result in a copy of the compressed payload part
# could not find a way to do otherwise since zlib.decompress()
# expects a string (and does not like a buffer or a memoryview object)
cpayload = b64decode[ext_header_size:]
else:
cpayload = encoded_histogram
hdr_payload = HdrPayload(8, compressed_payload=cpayload)
return hdr_payload | Decode a wire histogram encoding into a read-only Hdr Payload instance
Args:
encoded_histogram a string containing the wire encoding of a histogram
such as one returned from encode()
Returns:
an hdr_payload instance with all the decoded/uncompressed fields
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
zlib.error:
in case of zlib decompression error | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/codec.py#L313-L356 | null | class HdrHistogramEncoder():
'''An encoder class for histograms, only supports V1 encoding.
'''
def __init__(self, histogram, b64_wrap=True, hdr_payload=None):
'''Histogram encoder
Args:
histogram the histogram to encode/decode into
b64_wrap determines if the base64 wrapper is enabled or not
hdr_payload if None will create a new HdrPayload instance for this
encoder, else will reuse the passed Hdrayload instance (useful
after decoding one and to associate it to a new histogram)
word_size counters size in bytes (2, 4 or 8)
Exceptions:
ValueError if the word_size value is unsupported
'''
self.histogram = histogram
if not hdr_payload:
self.payload = HdrPayload(histogram.word_size, histogram.counts_len)
payload = self.payload.payload
# those values never change across encodings
payload.normalizing_index_offset = 0
payload.conversion_ratio_bits = 1
payload.significant_figures = histogram.significant_figures
payload.lowest_trackable_value = histogram.lowest_trackable_value
payload.highest_trackable_value = histogram.highest_trackable_value
else:
self.payload = hdr_payload
self.b64_wrap = b64_wrap
self.header = ExternalHeader()
self.header.cookie = get_compression_cookie()
def get_counts(self):
'''Retrieve the counts array that can be used to store live counters
and that can be encoded with minimal copies using encode()
'''
return self.payload.get_counts()
def encode(self):
'''Compress the associated encodable payload,
prepend the header then encode with base64 if requested
Returns:
the b64 encoded wire encoding of the histogram (as a string)
or the compressed payload (as a string, if b64 wrappinb is disabled)
'''
# only compress the first non zero buckets
# if histogram is empty we do not encode any counter
if self.histogram.total_count:
relevant_length = \
self.histogram.get_counts_array_index(self.histogram.max_value) + 1
else:
relevant_length = 0
cpayload = self.payload.compress(relevant_length)
if self.b64_wrap:
self.header.length = len(cpayload)
header_str = ctypes.string_at(addressof(self.header), ext_header_size)
return base64.b64encode(header_str + cpayload)
return cpayload
@staticmethod
def add(self, other_encoder):
add_array(addressof(self.get_counts()),
addressof(other_encoder.get_counts()),
self.histogram.counts_len,
self.histogram.word_size)
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.record_value | python | def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True | Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1) | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L175-L191 | [
"def _counts_index_for(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n return self._counts_index(bucket_index, sub_bucket_index)\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.record_corrected_value | python | def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval | Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1) | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L194-L208 | [
"def record_value(self, value, count=1):\n '''Record a new value into the histogram\n\n Args:\n value: the value to record (must be in the valid range)\n count: incremental count (defaults to 1)\n '''\n if value < 0:\n return False\n counts_index = self._counts_index_for(value)\n if (counts_index < 0) or (self.counts_len <= counts_index):\n return False\n self.counts[counts_index] += count\n self.total_count += count\n self.min_value = min(self.min_value, value)\n self.max_value = max(self.max_value, value)\n return True\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.get_value_at_percentile | python | def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0 | Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L268-L285 | [
"def get_count_at_index(self, index):\n if index >= self.counts_len:\n raise IndexError()\n # some decoded (read-only) histograms may have truncated\n # counts arrays, we return zero for any index that is passed the array\n if index >= self.encoder.payload.counts_len:\n return 0\n return self.counts[index]\n",
"def get_value_from_index(self, index):\n bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1\n sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \\\n self.sub_bucket_half_count\n if bucket_index < 0:\n sub_bucket_index -= self.sub_bucket_half_count\n bucket_index = 0\n return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)\n",
"def get_lowest_equivalent_value(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n\n lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,\n sub_bucket_index)\n return lowest_equivalent_value\n",
"def get_highest_equivalent_value(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n\n lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,\n sub_bucket_index)\n if sub_bucket_index >= self.sub_bucket_count:\n bucket_index += 1\n size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)\n next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range\n\n return next_non_equivalent_value - 1\n",
"def get_target_count_at_percentile(self, percentile):\n requested_percentile = min(percentile, 100.0)\n count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)\n return max(count_at_percentile, 1)\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.get_percentile_to_value_dict | python | def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result | A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L287-L326 | [
"def get_count_at_index(self, index):\n if index >= self.counts_len:\n raise IndexError()\n # some decoded (read-only) histograms may have truncated\n # counts arrays, we return zero for any index that is passed the array\n if index >= self.encoder.payload.counts_len:\n return 0\n return self.counts[index]\n",
"def get_value_from_index(self, index):\n bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1\n sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \\\n self.sub_bucket_half_count\n if bucket_index < 0:\n sub_bucket_index -= self.sub_bucket_half_count\n bucket_index = 0\n return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)\n",
"def get_lowest_equivalent_value(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n\n lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,\n sub_bucket_index)\n return lowest_equivalent_value\n",
"def get_highest_equivalent_value(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n\n lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,\n sub_bucket_index)\n if sub_bucket_index >= self.sub_bucket_count:\n bucket_index += 1\n size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)\n next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range\n\n return next_non_equivalent_value - 1\n",
"def get_target_count_at_percentile(self, percentile):\n requested_percentile = min(percentile, 100.0)\n count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)\n return max(count_at_percentile, 1)\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.values_are_equivalent | python | def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2) | Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L335-L342 | [
"def get_lowest_equivalent_value(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n\n lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,\n sub_bucket_index)\n return lowest_equivalent_value\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.reset | python | def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0 | Reset the histogram to a pristine state | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L386-L395 | null | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.adjust_internal_tacking_values | python | def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added | Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none) | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L424-L441 | [
"def get_value_from_index(self, index):\n bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1\n sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \\\n self.sub_bucket_half_count\n if bucket_index < 0:\n sub_bucket_index -= self.sub_bucket_half_count\n bucket_index = 0\n return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)\n",
"def get_highest_equivalent_value(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n\n lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,\n sub_bucket_index)\n if sub_bucket_index >= self.sub_bucket_count:\n bucket_index += 1\n size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)\n next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range\n\n return next_non_equivalent_value - 1\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.set_internal_tacking_values | python | def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added | Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none) | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L443-L458 | [
"def get_value_from_index(self, index):\n bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1\n sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \\\n self.sub_bucket_half_count\n if bucket_index < 0:\n sub_bucket_index -= self.sub_bucket_half_count\n bucket_index = 0\n return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)\n",
"def get_highest_equivalent_value(self, value):\n bucket_index = self._get_bucket_index(value)\n sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)\n\n lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,\n sub_bucket_index)\n if sub_bucket_index >= self.sub_bucket_count:\n bucket_index += 1\n size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)\n next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range\n\n return next_non_equivalent_value - 1\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.get_counts_array_index | python | def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket | Return the index in the counts array for a given value | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L460-L475 | [
"def _get_bucket_index(self, value):\n # smallest power of 2 containing value\n pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)\n return int(pow2ceiling - self.unit_magnitude -\n (self.sub_bucket_half_count_magnitude + 1))\n",
"def _get_sub_bucket_index(self, value, bucket_index):\n return int(value) >> (bucket_index + self.unit_magnitude)\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.decode_and_add | python | def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist) | Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L530-L547 | [
"def add(self, other_hist):\n highest_recordable_value = \\\n self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))\n if highest_recordable_value < other_hist.get_max_value():\n raise IndexError(\"The other histogram includes values that do not fit %d < %d\" %\n (highest_recordable_value, other_hist.get_max_value()))\n\n if (self.bucket_count == other_hist.bucket_count) and \\\n (self.sub_bucket_count == other_hist.sub_bucket_count) and \\\n (self.unit_magnitude == other_hist.unit_magnitude) and \\\n (self.word_size == other_hist.word_size):\n\n # do an in-place addition of one array to another\n self.encoder.add(other_hist.encoder)\n\n self.total_count += other_hist.get_total_count()\n self.max_value = max(self.max_value, other_hist.get_max_value())\n self.min_value = min(self.get_min_value(), other_hist.get_min_value())\n else:\n # Arrays are not a direct match, so we can't just stream through and add them.\n # Instead, go through the array and add each non-zero value found at it's proper value:\n for index in range(other_hist.counts_len):\n other_count = other_hist.get_count_at_index(index)\n if other_count > 0:\n self.record_value(other_hist.get_value_from_index(index), other_count)\n\n self.start_time_stamp_msec = \\\n min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)\n self.end_time_stamp_msec = \\\n max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)\n",
"def decode(encoded_histogram, b64_wrap=True):\n '''Decode an encoded histogram and return a new histogram instance that\n has been initialized with the decoded content\n Return:\n a new histogram instance representing the decoded content\n Exception:\n TypeError in case of base64 decode error\n HdrCookieException:\n the main header has an invalid cookie\n the compressed payload header has an invalid cookie\n HdrLengthException:\n the decompressed size is too small for the HdrPayload structure\n or is not aligned or is too large for the passed payload class\n zlib.error:\n in case of zlib decompression error\n '''\n hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)\n payload = hdr_payload.payload\n histogram = HdrHistogram(payload.lowest_trackable_value,\n payload.highest_trackable_value,\n payload.significant_figures,\n hdr_payload=hdr_payload)\n return histogram\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/histogram.py | HdrHistogram.decode | python | def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram | Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L550-L572 | [
"def decode(encoded_histogram, b64_wrap=True):\n '''Decode a wire histogram encoding into a read-only Hdr Payload instance\n Args:\n encoded_histogram a string containing the wire encoding of a histogram\n such as one returned from encode()\n Returns:\n an hdr_payload instance with all the decoded/uncompressed fields\n\n Exception:\n TypeError in case of base64 decode error\n HdrCookieException:\n the main header has an invalid cookie\n the compressed payload header has an invalid cookie\n HdrLengthException:\n the decompressed size is too small for the HdrPayload structure\n or is not aligned or is too large for the passed payload class\n HdrHistogramSettingsException:\n mismatch in the significant figures, lowest and highest\n trackable value\n zlib.error:\n in case of zlib decompression error\n '''\n if b64_wrap:\n b64decode = base64.b64decode(encoded_histogram)\n # this string has 2 parts in it: the header (raw) and the payload (compressed)\n b64dec_len = len(b64decode)\n\n if b64dec_len < ext_header_size:\n raise HdrLengthException('Base64 decoded message too short')\n\n header = ExternalHeader.from_buffer_copy(b64decode)\n if get_cookie_base(header.cookie) != V2_COMPRESSION_COOKIE_BASE:\n raise HdrCookieException()\n if header.length != b64dec_len - ext_header_size:\n raise HdrLengthException('Decoded length=%d buffer length=%d' %\n (header.length, b64dec_len - ext_header_size))\n # this will result in a copy of the compressed payload part\n # could not find a way to do otherwise since zlib.decompress()\n # expects a string (and does not like a buffer or a memoryview object)\n cpayload = b64decode[ext_header_size:]\n else:\n cpayload = encoded_histogram\n hdr_payload = HdrPayload(8, compressed_payload=cpayload)\n return hdr_payload\n"
] | class HdrHistogram():
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
# pylint: disable=inconsistent-return-statements
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if self.max_value == 0:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if self.counts[0] > 0 or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
self.start_time_stamp_msec = sys.maxsize
self.end_time_stamp_msec = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write(b'%12s %14s %10s %14s\n\n' %
(b'Value', b'Percentile', b'TotalCount', b'1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format.encode() % (value, percentile,
total_count, other))
else:
out_file.write(last_line_percentile_format.encode() % (value,
percentile,
total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.
format(self.significant_figures).encode() % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures).encode() % (max, total))
out_file.write(b'#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
HdrHistogram/HdrHistogram_py | hdrh/log.py | HistogramLogWriter.output_interval_histogram | python | def output_interval_histogram(self,
histogram,
start_time_stamp_sec=0,
end_time_stamp_sec=0,
max_value_unit_ratio=1000000.0):
'''Output an interval histogram, with the given timestamp and a
configurable maxValueUnitRatio.
(note that the specified timestamp will be used, and the timestamp in
the actual histogram will be ignored).
The max value reported with the interval line will be scaled by the
given max_value_unit_ratio.
The histogram start and end timestamps are assumed to be in msec units.
Logging will be in seconds, realtive by a base time
The default base time is 0.
By covention, histogram start/end time are generally stamped with
absolute times in msec since the epoch. For logging with absolute time
stamps, the base time would remain zero. For
logging with relative time stamps (time since a start point),
Params:
histogram The interval histogram to log.
start_time_stamp_sec The start timestamp to log with the
interval histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
end_time_stamp_sec The end timestamp to log with the interval
histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
max_value_unit_ratio The ratio by which to divide the histogram's max
value when reporting on it.
default: 1,000,000 (which is the msec : nsec ratio
'''
if not start_time_stamp_sec:
start_time_stamp_sec = \
(histogram.get_start_time_stamp() - self.base_time) / 1000.0
if not end_time_stamp_sec:
end_time_stamp_sec = (histogram.get_end_time_stamp() - self.base_time) / 1000.0
cpayload = histogram.encode()
self.log.write("%f,%f,%f,%s\n" %
(start_time_stamp_sec,
end_time_stamp_sec - start_time_stamp_sec,
histogram.get_max_value() // max_value_unit_ratio,
cpayload.decode('utf-8'))) | Output an interval histogram, with the given timestamp and a
configurable maxValueUnitRatio.
(note that the specified timestamp will be used, and the timestamp in
the actual histogram will be ignored).
The max value reported with the interval line will be scaled by the
given max_value_unit_ratio.
The histogram start and end timestamps are assumed to be in msec units.
Logging will be in seconds, realtive by a base time
The default base time is 0.
By covention, histogram start/end time are generally stamped with
absolute times in msec since the epoch. For logging with absolute time
stamps, the base time would remain zero. For
logging with relative time stamps (time since a start point),
Params:
histogram The interval histogram to log.
start_time_stamp_sec The start timestamp to log with the
interval histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
end_time_stamp_sec The end timestamp to log with the interval
histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
max_value_unit_ratio The ratio by which to divide the histogram's max
value when reporting on it.
default: 1,000,000 (which is the msec : nsec ratio | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/log.py#L51-L92 | [
"def get_max_value(self):\n if self.max_value == 0:\n return 0\n return self.get_highest_equivalent_value(self.max_value)\n",
"def encode(self):\n '''Encode this histogram\n Return:\n a string containing the base64 encoded compressed histogram (V1 format)\n '''\n return self.encoder.encode()\n",
"def get_start_time_stamp(self):\n return self.start_time_stamp_msec\n",
"def get_end_time_stamp(self):\n return self.end_time_stamp_msec\n"
] | class HistogramLogWriter():
HISTOGRAM_LOG_FORMAT_VERSION = "1.2"
def __init__(self, output_file):
'''Constructs a new HistogramLogWriter that will write into the specified file.
Params:
output_file the File to write to
'''
self.log = output_file
self.base_time = 0
def output_start_time(self, start_time_msec):
'''Log a start time in the log.
Params:
start_time_msec time (in milliseconds) since the absolute start time (the epoch)
'''
self.log.write("#[StartTime: %f (seconds since epoch), %s]\n" %
(float(start_time_msec) / 1000.0,
datetime.fromtimestamp(start_time_msec).iso_format(' ')))
def output_base_time(self, base_time_msec):
'''Log a base time in the log.
Params:
base_time_msec time (in milliseconds) since the absolute start time (the epoch)
'''
self.log.write("#[BaseTime: %f (seconds since epoch)]\n" %
(float(base_time_msec) / 1000.0))
def output_comment(self, comment):
'''Log a comment to the log.
Comments will be preceded with with the '#' character.
Params:
comment the comment string.
'''
self.log.write("#%s\n" % (comment))
def output_legend(self):
'''Output a legend line to the log.
'''
self.log.write("\"StartTimestamp\",\"Interval_Length\","
"\"Interval_Max\",\"Interval_Compressed_Histogram\"\n")
def output_log_format_version(self):
'''Output a log format version to the log.
'''
self.output_comment("[Histogram log format version " +
HistogramLogWriter.HISTOGRAM_LOG_FORMAT_VERSION + "]")
def close(self):
self.log.close()
|
HdrHistogram/HdrHistogram_py | hdrh/log.py | HistogramLogWriter.output_start_time | python | def output_start_time(self, start_time_msec):
'''Log a start time in the log.
Params:
start_time_msec time (in milliseconds) since the absolute start time (the epoch)
'''
self.log.write("#[StartTime: %f (seconds since epoch), %s]\n" %
(float(start_time_msec) / 1000.0,
datetime.fromtimestamp(start_time_msec).iso_format(' '))) | Log a start time in the log.
Params:
start_time_msec time (in milliseconds) since the absolute start time (the epoch) | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/log.py#L94-L101 | null | class HistogramLogWriter():
HISTOGRAM_LOG_FORMAT_VERSION = "1.2"
def __init__(self, output_file):
'''Constructs a new HistogramLogWriter that will write into the specified file.
Params:
output_file the File to write to
'''
self.log = output_file
self.base_time = 0
def output_interval_histogram(self,
histogram,
start_time_stamp_sec=0,
end_time_stamp_sec=0,
max_value_unit_ratio=1000000.0):
'''Output an interval histogram, with the given timestamp and a
configurable maxValueUnitRatio.
(note that the specified timestamp will be used, and the timestamp in
the actual histogram will be ignored).
The max value reported with the interval line will be scaled by the
given max_value_unit_ratio.
The histogram start and end timestamps are assumed to be in msec units.
Logging will be in seconds, realtive by a base time
The default base time is 0.
By covention, histogram start/end time are generally stamped with
absolute times in msec since the epoch. For logging with absolute time
stamps, the base time would remain zero. For
logging with relative time stamps (time since a start point),
Params:
histogram The interval histogram to log.
start_time_stamp_sec The start timestamp to log with the
interval histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
end_time_stamp_sec The end timestamp to log with the interval
histogram, in seconds.
default: using the start/end timestamp indicated in the histogram
max_value_unit_ratio The ratio by which to divide the histogram's max
value when reporting on it.
default: 1,000,000 (which is the msec : nsec ratio
'''
if not start_time_stamp_sec:
start_time_stamp_sec = \
(histogram.get_start_time_stamp() - self.base_time) / 1000.0
if not end_time_stamp_sec:
end_time_stamp_sec = (histogram.get_end_time_stamp() - self.base_time) / 1000.0
cpayload = histogram.encode()
self.log.write("%f,%f,%f,%s\n" %
(start_time_stamp_sec,
end_time_stamp_sec - start_time_stamp_sec,
histogram.get_max_value() // max_value_unit_ratio,
cpayload.decode('utf-8')))
def output_base_time(self, base_time_msec):
'''Log a base time in the log.
Params:
base_time_msec time (in milliseconds) since the absolute start time (the epoch)
'''
self.log.write("#[BaseTime: %f (seconds since epoch)]\n" %
(float(base_time_msec) / 1000.0))
def output_comment(self, comment):
'''Log a comment to the log.
Comments will be preceded with with the '#' character.
Params:
comment the comment string.
'''
self.log.write("#%s\n" % (comment))
def output_legend(self):
'''Output a legend line to the log.
'''
self.log.write("\"StartTimestamp\",\"Interval_Length\","
"\"Interval_Max\",\"Interval_Compressed_Histogram\"\n")
def output_log_format_version(self):
'''Output a log format version to the log.
'''
self.output_comment("[Histogram log format version " +
HistogramLogWriter.HISTOGRAM_LOG_FORMAT_VERSION + "]")
def close(self):
self.log.close()
|
HdrHistogram/HdrHistogram_py | hdrh/log.py | HistogramLogReader._decode_next_interval_histogram | python | def _decode_next_interval_histogram(self,
dest_histogram,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram if None, created a new histogram, else adds
the new interval histogram to it
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
while 1:
line = self.input_file.readline()
if not line:
return None
if line[0] == '#':
match_res = re_start_time.match(line)
if match_res:
self.start_time_sec = float(match_res.group(1))
self.observed_start_time = True
continue
match_res = re_base_time.match(line)
if match_res:
self.base_time_sec = float(match_res.group(1))
self.observed_base_time = True
continue
match_res = re_histogram_interval.match(line)
if not match_res:
# probably a legend line that starts with "\"StartTimestamp"
continue
# Decode: startTimestamp, intervalLength, maxTime, histogramPayload
# Timestamp is expected to be in seconds
log_time_stamp_in_sec = float(match_res.group(1))
interval_length_sec = float(match_res.group(2))
cpayload = match_res.group(4)
if not self.observed_start_time:
# No explicit start time noted. Use 1st observed time:
self.start_time_sec = log_time_stamp_in_sec
self.observed_start_time = True
if not self.observed_base_time:
# No explicit base time noted.
# Deduce from 1st observed time (compared to start time):
if log_time_stamp_in_sec < self.start_time_sec - (365 * 24 * 3600.0):
# Criteria Note: if log timestamp is more than a year in
# the past (compared to StartTime),
# we assume that timestamps in the log are not absolute
self.base_time_sec = self.start_time_sec
else:
# Timestamps are absolute
self.base_time_sec = 0.0
self.observed_base_time = True
absolute_start_time_stamp_sec = \
log_time_stamp_in_sec + self.base_time_sec
offset_start_time_stamp_sec = \
absolute_start_time_stamp_sec - self.start_time_sec
# Timestamp length is expect to be in seconds
absolute_end_time_stamp_sec = \
absolute_start_time_stamp_sec + interval_length_sec
if absolute:
start_time_stamp_to_check_range_on = absolute_start_time_stamp_sec
else:
start_time_stamp_to_check_range_on = offset_start_time_stamp_sec
if start_time_stamp_to_check_range_on < range_start_time_sec:
continue
if start_time_stamp_to_check_range_on > range_end_time_sec:
return None
if dest_histogram:
# add the interval histogram to the destination histogram
histogram = dest_histogram
histogram.decode_and_add(cpayload)
else:
histogram = HdrHistogram.decode(cpayload)
histogram.set_start_time_stamp(absolute_start_time_stamp_sec * 1000.0)
histogram.set_end_time_stamp(absolute_end_time_stamp_sec * 1000.0)
return histogram | Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram if None, created a new histogram, else adds
the new interval histogram to it
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/log.py#L173-L289 | [
"def decode(encoded_histogram, b64_wrap=True):\n '''Decode an encoded histogram and return a new histogram instance that\n has been initialized with the decoded content\n Return:\n a new histogram instance representing the decoded content\n Exception:\n TypeError in case of base64 decode error\n HdrCookieException:\n the main header has an invalid cookie\n the compressed payload header has an invalid cookie\n HdrLengthException:\n the decompressed size is too small for the HdrPayload structure\n or is not aligned or is too large for the passed payload class\n zlib.error:\n in case of zlib decompression error\n '''\n hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)\n payload = hdr_payload.payload\n histogram = HdrHistogram(payload.lowest_trackable_value,\n payload.highest_trackable_value,\n payload.significant_figures,\n hdr_payload=hdr_payload)\n return histogram\n"
] | class HistogramLogReader():
def __init__(self, input_file_name, reference_histogram):
'''Constructs a new HistogramLogReader that produces intervals read
from the specified file name.
Params:
input_file_name The name of the file to read from
reference_histogram a histogram instance used as a reference to create
new instances for all subsequent decoded interval
histograms
'''
self.start_time_sec = 0.0
self.observed_start_time = False
self.base_time_sec = 0.0
self.observed_base_time = False
self.input_file = open(input_file_name, "r")
self.reference_histogram = reference_histogram
def get_start_time_sec(self):
'''get the latest start time found in the file so far (or 0.0),
per the log file format explained above. Assuming the "#[StartTime:" comment
line precedes the actual intervals recorded in the file, getStartTimeSec() can
be safely used after each interval is read to determine's the offset of that
interval's timestamp from the epoch.
Return:
latest Start Time found in the file (or 0.0 if non found)
'''
return self.start_time_sec
def get_next_interval_histogram(self,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
return self._decode_next_interval_histogram(None,
range_start_time_sec,
range_end_time_sec,
absolute)
def add_next_interval_histogram(self,
dest_histogram=None,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range, and add it to the destination
histogram (or to the reference histogram if dest_histogram is None)
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram where to add the next interval histogram, if None
the interal histogram will be added to the reference
histogram passed in the constructor
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns the destination histogram if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or None if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
if not dest_histogram:
dest_histogram = self.reference_histogram
return self._decode_next_interval_histogram(dest_histogram,
range_start_time_sec,
range_end_time_sec,
absolute)
def close(self):
self.input_file.close()
|
HdrHistogram/HdrHistogram_py | hdrh/log.py | HistogramLogReader.get_next_interval_histogram | python | def get_next_interval_histogram(self,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
return self._decode_next_interval_histogram(None,
range_start_time_sec,
range_end_time_sec,
absolute) | Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/log.py#L291-L337 | [
"def _decode_next_interval_histogram(self,\n dest_histogram,\n range_start_time_sec=0.0,\n range_end_time_sec=sys.maxsize,\n absolute=False):\n '''Read the next interval histogram from the log, if interval falls\n within an absolute or relative time range.\n\n Timestamps are assumed to appear in order in the log file, and as such\n this method will return a null upon encountering a timestamp larger than\n range_end_time_sec.\n\n Relative time range:\n the range is assumed to be in seconds relative to\n the actual timestamp value found in each interval line in the log\n Absolute time range:\n Absolute timestamps are calculated by adding the timestamp found\n with the recorded interval to the [latest, optional] start time\n found in the log. The start time is indicated in the log with\n a \"#[StartTime: \" followed by the start time in seconds.\n\n Params:\n dest_histogram if None, created a new histogram, else adds\n the new interval histogram to it\n range_start_time_sec The absolute or relative start of the expected\n time range, in seconds.\n range_start_time_sec The absolute or relative end of the expected\n time range, in seconds.\n absolute Defines if the passed range is absolute or relative\n\n Return:\n Returns an histogram object if an interval line was found with an\n associated start timestamp value that falls between start_time_sec and\n end_time_sec,\n or null if no such interval line is found.\n Upon encountering any unexpected format errors in reading the next\n interval from the file, this method will return None.\n\n The histogram returned will have it's timestamp set to the absolute\n timestamp calculated from adding the interval's indicated timestamp\n value to the latest [optional] start time found in the log.\n\n Exceptions:\n ValueError if there is a syntax error in one of the float fields\n '''\n while 1:\n line = self.input_file.readline()\n if not line:\n return None\n if line[0] == '#':\n match_res = re_start_time.match(line)\n if match_res:\n self.start_time_sec = float(match_res.group(1))\n self.observed_start_time = True\n continue\n match_res = re_base_time.match(line)\n if match_res:\n self.base_time_sec = float(match_res.group(1))\n self.observed_base_time = True\n continue\n\n match_res = re_histogram_interval.match(line)\n if not match_res:\n # probably a legend line that starts with \"\\\"StartTimestamp\"\n continue\n # Decode: startTimestamp, intervalLength, maxTime, histogramPayload\n # Timestamp is expected to be in seconds\n log_time_stamp_in_sec = float(match_res.group(1))\n interval_length_sec = float(match_res.group(2))\n cpayload = match_res.group(4)\n\n if not self.observed_start_time:\n # No explicit start time noted. Use 1st observed time:\n self.start_time_sec = log_time_stamp_in_sec\n self.observed_start_time = True\n\n if not self.observed_base_time:\n # No explicit base time noted.\n # Deduce from 1st observed time (compared to start time):\n if log_time_stamp_in_sec < self.start_time_sec - (365 * 24 * 3600.0):\n # Criteria Note: if log timestamp is more than a year in\n # the past (compared to StartTime),\n # we assume that timestamps in the log are not absolute\n self.base_time_sec = self.start_time_sec\n else:\n # Timestamps are absolute\n self.base_time_sec = 0.0\n self.observed_base_time = True\n\n absolute_start_time_stamp_sec = \\\n log_time_stamp_in_sec + self.base_time_sec\n offset_start_time_stamp_sec = \\\n absolute_start_time_stamp_sec - self.start_time_sec\n\n # Timestamp length is expect to be in seconds\n absolute_end_time_stamp_sec = \\\n absolute_start_time_stamp_sec + interval_length_sec\n\n if absolute:\n start_time_stamp_to_check_range_on = absolute_start_time_stamp_sec\n else:\n start_time_stamp_to_check_range_on = offset_start_time_stamp_sec\n\n if start_time_stamp_to_check_range_on < range_start_time_sec:\n continue\n\n if start_time_stamp_to_check_range_on > range_end_time_sec:\n return None\n if dest_histogram:\n # add the interval histogram to the destination histogram\n histogram = dest_histogram\n histogram.decode_and_add(cpayload)\n else:\n histogram = HdrHistogram.decode(cpayload)\n histogram.set_start_time_stamp(absolute_start_time_stamp_sec * 1000.0)\n histogram.set_end_time_stamp(absolute_end_time_stamp_sec * 1000.0)\n return histogram\n"
] | class HistogramLogReader():
def __init__(self, input_file_name, reference_histogram):
'''Constructs a new HistogramLogReader that produces intervals read
from the specified file name.
Params:
input_file_name The name of the file to read from
reference_histogram a histogram instance used as a reference to create
new instances for all subsequent decoded interval
histograms
'''
self.start_time_sec = 0.0
self.observed_start_time = False
self.base_time_sec = 0.0
self.observed_base_time = False
self.input_file = open(input_file_name, "r")
self.reference_histogram = reference_histogram
def get_start_time_sec(self):
'''get the latest start time found in the file so far (or 0.0),
per the log file format explained above. Assuming the "#[StartTime:" comment
line precedes the actual intervals recorded in the file, getStartTimeSec() can
be safely used after each interval is read to determine's the offset of that
interval's timestamp from the epoch.
Return:
latest Start Time found in the file (or 0.0 if non found)
'''
return self.start_time_sec
def _decode_next_interval_histogram(self,
dest_histogram,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram if None, created a new histogram, else adds
the new interval histogram to it
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
while 1:
line = self.input_file.readline()
if not line:
return None
if line[0] == '#':
match_res = re_start_time.match(line)
if match_res:
self.start_time_sec = float(match_res.group(1))
self.observed_start_time = True
continue
match_res = re_base_time.match(line)
if match_res:
self.base_time_sec = float(match_res.group(1))
self.observed_base_time = True
continue
match_res = re_histogram_interval.match(line)
if not match_res:
# probably a legend line that starts with "\"StartTimestamp"
continue
# Decode: startTimestamp, intervalLength, maxTime, histogramPayload
# Timestamp is expected to be in seconds
log_time_stamp_in_sec = float(match_res.group(1))
interval_length_sec = float(match_res.group(2))
cpayload = match_res.group(4)
if not self.observed_start_time:
# No explicit start time noted. Use 1st observed time:
self.start_time_sec = log_time_stamp_in_sec
self.observed_start_time = True
if not self.observed_base_time:
# No explicit base time noted.
# Deduce from 1st observed time (compared to start time):
if log_time_stamp_in_sec < self.start_time_sec - (365 * 24 * 3600.0):
# Criteria Note: if log timestamp is more than a year in
# the past (compared to StartTime),
# we assume that timestamps in the log are not absolute
self.base_time_sec = self.start_time_sec
else:
# Timestamps are absolute
self.base_time_sec = 0.0
self.observed_base_time = True
absolute_start_time_stamp_sec = \
log_time_stamp_in_sec + self.base_time_sec
offset_start_time_stamp_sec = \
absolute_start_time_stamp_sec - self.start_time_sec
# Timestamp length is expect to be in seconds
absolute_end_time_stamp_sec = \
absolute_start_time_stamp_sec + interval_length_sec
if absolute:
start_time_stamp_to_check_range_on = absolute_start_time_stamp_sec
else:
start_time_stamp_to_check_range_on = offset_start_time_stamp_sec
if start_time_stamp_to_check_range_on < range_start_time_sec:
continue
if start_time_stamp_to_check_range_on > range_end_time_sec:
return None
if dest_histogram:
# add the interval histogram to the destination histogram
histogram = dest_histogram
histogram.decode_and_add(cpayload)
else:
histogram = HdrHistogram.decode(cpayload)
histogram.set_start_time_stamp(absolute_start_time_stamp_sec * 1000.0)
histogram.set_end_time_stamp(absolute_end_time_stamp_sec * 1000.0)
return histogram
def add_next_interval_histogram(self,
dest_histogram=None,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range, and add it to the destination
histogram (or to the reference histogram if dest_histogram is None)
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram where to add the next interval histogram, if None
the interal histogram will be added to the reference
histogram passed in the constructor
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns the destination histogram if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or None if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
if not dest_histogram:
dest_histogram = self.reference_histogram
return self._decode_next_interval_histogram(dest_histogram,
range_start_time_sec,
range_end_time_sec,
absolute)
def close(self):
self.input_file.close()
|
HdrHistogram/HdrHistogram_py | hdrh/log.py | HistogramLogReader.add_next_interval_histogram | python | def add_next_interval_histogram(self,
dest_histogram=None,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range, and add it to the destination
histogram (or to the reference histogram if dest_histogram is None)
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram where to add the next interval histogram, if None
the interal histogram will be added to the reference
histogram passed in the constructor
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns the destination histogram if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or None if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
if not dest_histogram:
dest_histogram = self.reference_histogram
return self._decode_next_interval_histogram(dest_histogram,
range_start_time_sec,
range_end_time_sec,
absolute) | Read the next interval histogram from the log, if interval falls
within an absolute or relative time range, and add it to the destination
histogram (or to the reference histogram if dest_histogram is None)
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram where to add the next interval histogram, if None
the interal histogram will be added to the reference
histogram passed in the constructor
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns the destination histogram if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or None if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields | train | https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/log.py#L339-L391 | [
"def _decode_next_interval_histogram(self,\n dest_histogram,\n range_start_time_sec=0.0,\n range_end_time_sec=sys.maxsize,\n absolute=False):\n '''Read the next interval histogram from the log, if interval falls\n within an absolute or relative time range.\n\n Timestamps are assumed to appear in order in the log file, and as such\n this method will return a null upon encountering a timestamp larger than\n range_end_time_sec.\n\n Relative time range:\n the range is assumed to be in seconds relative to\n the actual timestamp value found in each interval line in the log\n Absolute time range:\n Absolute timestamps are calculated by adding the timestamp found\n with the recorded interval to the [latest, optional] start time\n found in the log. The start time is indicated in the log with\n a \"#[StartTime: \" followed by the start time in seconds.\n\n Params:\n dest_histogram if None, created a new histogram, else adds\n the new interval histogram to it\n range_start_time_sec The absolute or relative start of the expected\n time range, in seconds.\n range_start_time_sec The absolute or relative end of the expected\n time range, in seconds.\n absolute Defines if the passed range is absolute or relative\n\n Return:\n Returns an histogram object if an interval line was found with an\n associated start timestamp value that falls between start_time_sec and\n end_time_sec,\n or null if no such interval line is found.\n Upon encountering any unexpected format errors in reading the next\n interval from the file, this method will return None.\n\n The histogram returned will have it's timestamp set to the absolute\n timestamp calculated from adding the interval's indicated timestamp\n value to the latest [optional] start time found in the log.\n\n Exceptions:\n ValueError if there is a syntax error in one of the float fields\n '''\n while 1:\n line = self.input_file.readline()\n if not line:\n return None\n if line[0] == '#':\n match_res = re_start_time.match(line)\n if match_res:\n self.start_time_sec = float(match_res.group(1))\n self.observed_start_time = True\n continue\n match_res = re_base_time.match(line)\n if match_res:\n self.base_time_sec = float(match_res.group(1))\n self.observed_base_time = True\n continue\n\n match_res = re_histogram_interval.match(line)\n if not match_res:\n # probably a legend line that starts with \"\\\"StartTimestamp\"\n continue\n # Decode: startTimestamp, intervalLength, maxTime, histogramPayload\n # Timestamp is expected to be in seconds\n log_time_stamp_in_sec = float(match_res.group(1))\n interval_length_sec = float(match_res.group(2))\n cpayload = match_res.group(4)\n\n if not self.observed_start_time:\n # No explicit start time noted. Use 1st observed time:\n self.start_time_sec = log_time_stamp_in_sec\n self.observed_start_time = True\n\n if not self.observed_base_time:\n # No explicit base time noted.\n # Deduce from 1st observed time (compared to start time):\n if log_time_stamp_in_sec < self.start_time_sec - (365 * 24 * 3600.0):\n # Criteria Note: if log timestamp is more than a year in\n # the past (compared to StartTime),\n # we assume that timestamps in the log are not absolute\n self.base_time_sec = self.start_time_sec\n else:\n # Timestamps are absolute\n self.base_time_sec = 0.0\n self.observed_base_time = True\n\n absolute_start_time_stamp_sec = \\\n log_time_stamp_in_sec + self.base_time_sec\n offset_start_time_stamp_sec = \\\n absolute_start_time_stamp_sec - self.start_time_sec\n\n # Timestamp length is expect to be in seconds\n absolute_end_time_stamp_sec = \\\n absolute_start_time_stamp_sec + interval_length_sec\n\n if absolute:\n start_time_stamp_to_check_range_on = absolute_start_time_stamp_sec\n else:\n start_time_stamp_to_check_range_on = offset_start_time_stamp_sec\n\n if start_time_stamp_to_check_range_on < range_start_time_sec:\n continue\n\n if start_time_stamp_to_check_range_on > range_end_time_sec:\n return None\n if dest_histogram:\n # add the interval histogram to the destination histogram\n histogram = dest_histogram\n histogram.decode_and_add(cpayload)\n else:\n histogram = HdrHistogram.decode(cpayload)\n histogram.set_start_time_stamp(absolute_start_time_stamp_sec * 1000.0)\n histogram.set_end_time_stamp(absolute_end_time_stamp_sec * 1000.0)\n return histogram\n"
] | class HistogramLogReader():
def __init__(self, input_file_name, reference_histogram):
'''Constructs a new HistogramLogReader that produces intervals read
from the specified file name.
Params:
input_file_name The name of the file to read from
reference_histogram a histogram instance used as a reference to create
new instances for all subsequent decoded interval
histograms
'''
self.start_time_sec = 0.0
self.observed_start_time = False
self.base_time_sec = 0.0
self.observed_base_time = False
self.input_file = open(input_file_name, "r")
self.reference_histogram = reference_histogram
def get_start_time_sec(self):
'''get the latest start time found in the file so far (or 0.0),
per the log file format explained above. Assuming the "#[StartTime:" comment
line precedes the actual intervals recorded in the file, getStartTimeSec() can
be safely used after each interval is read to determine's the offset of that
interval's timestamp from the epoch.
Return:
latest Start Time found in the file (or 0.0 if non found)
'''
return self.start_time_sec
def _decode_next_interval_histogram(self,
dest_histogram,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
dest_histogram if None, created a new histogram, else adds
the new interval histogram to it
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
while 1:
line = self.input_file.readline()
if not line:
return None
if line[0] == '#':
match_res = re_start_time.match(line)
if match_res:
self.start_time_sec = float(match_res.group(1))
self.observed_start_time = True
continue
match_res = re_base_time.match(line)
if match_res:
self.base_time_sec = float(match_res.group(1))
self.observed_base_time = True
continue
match_res = re_histogram_interval.match(line)
if not match_res:
# probably a legend line that starts with "\"StartTimestamp"
continue
# Decode: startTimestamp, intervalLength, maxTime, histogramPayload
# Timestamp is expected to be in seconds
log_time_stamp_in_sec = float(match_res.group(1))
interval_length_sec = float(match_res.group(2))
cpayload = match_res.group(4)
if not self.observed_start_time:
# No explicit start time noted. Use 1st observed time:
self.start_time_sec = log_time_stamp_in_sec
self.observed_start_time = True
if not self.observed_base_time:
# No explicit base time noted.
# Deduce from 1st observed time (compared to start time):
if log_time_stamp_in_sec < self.start_time_sec - (365 * 24 * 3600.0):
# Criteria Note: if log timestamp is more than a year in
# the past (compared to StartTime),
# we assume that timestamps in the log are not absolute
self.base_time_sec = self.start_time_sec
else:
# Timestamps are absolute
self.base_time_sec = 0.0
self.observed_base_time = True
absolute_start_time_stamp_sec = \
log_time_stamp_in_sec + self.base_time_sec
offset_start_time_stamp_sec = \
absolute_start_time_stamp_sec - self.start_time_sec
# Timestamp length is expect to be in seconds
absolute_end_time_stamp_sec = \
absolute_start_time_stamp_sec + interval_length_sec
if absolute:
start_time_stamp_to_check_range_on = absolute_start_time_stamp_sec
else:
start_time_stamp_to_check_range_on = offset_start_time_stamp_sec
if start_time_stamp_to_check_range_on < range_start_time_sec:
continue
if start_time_stamp_to_check_range_on > range_end_time_sec:
return None
if dest_histogram:
# add the interval histogram to the destination histogram
histogram = dest_histogram
histogram.decode_and_add(cpayload)
else:
histogram = HdrHistogram.decode(cpayload)
histogram.set_start_time_stamp(absolute_start_time_stamp_sec * 1000.0)
histogram.set_end_time_stamp(absolute_end_time_stamp_sec * 1000.0)
return histogram
def get_next_interval_histogram(self,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
return self._decode_next_interval_histogram(None,
range_start_time_sec,
range_end_time_sec,
absolute)
def close(self):
self.input_file.close()
|
dwkim78/upsilon | upsilon/utils/utils.py | sigma_clipping | python | def sigma_clipping(date, mag, err, threshold=3, iteration=1):
# Check length.
if (len(date) != len(mag)) \
or (len(date) != len(err)) \
or (len(mag) != len(err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# By magnitudes
for i in range(int(iteration)):
mean = np.median(mag)
std = np.std(mag)
index = (mag >= mean - threshold*std) & (mag <= mean + threshold*std)
date = date[index]
mag = mag[index]
err = err[index]
return date, mag, err | Remove any fluctuated data points by magnitudes.
Parameters
----------
date : array_like
An array of dates.
mag : array_like
An array of magnitudes.
err : array_like
An array of magnitude errors.
threshold : float, optional
Threshold for sigma-clipping.
iteration : int, optional
The number of iteration.
Returns
-------
date : array_like
Sigma-clipped dates.
mag : array_like
Sigma-clipped magnitudes.
err : array_like
Sigma-clipped magnitude errors. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/utils/utils.py#L4-L47 | null | import numpy as np
|
dwkim78/upsilon | upsilon/extract_features/feature_set.py | get_feature_set_all | python | def get_feature_set_all():
features = get_feature_set()
features.append('cusum')
features.append('eta')
features.append('n_points')
features.append('period_SNR')
features.append('period_log10FAP')
features.append('period_uncertainty')
features.append('weighted_mean')
features.append('weighted_std')
features.sort()
return features | Return a list of entire features.
A set of entire features regardless of being used to train a model or
predict a class.
Returns
-------
feature_names : list
A list of features' names. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/feature_set.py#L23-L49 | [
"def get_feature_set():\n \"\"\"\n Return a list of features' names.\n\n Features' name that are used to train a model and predict a class.\n Sorted by the names.\n\n Returns\n -------\n feature_names : list\n A list of features' names.\n \"\"\"\n\n features = ['amplitude', 'hl_amp_ratio', 'kurtosis', 'period',\n 'phase_cusum', 'phase_eta', 'phi21', 'phi31', 'quartile31',\n 'r21', 'r31', 'shapiro_w', 'skewness', 'slope_per10',\n 'slope_per90', 'stetson_k']\n features.sort()\n\n return features\n"
] | def get_feature_set():
"""
Return a list of features' names.
Features' name that are used to train a model and predict a class.
Sorted by the names.
Returns
-------
feature_names : list
A list of features' names.
"""
features = ['amplitude', 'hl_amp_ratio', 'kurtosis', 'period',
'phase_cusum', 'phase_eta', 'phi21', 'phi31', 'quartile31',
'r21', 'r31', 'shapiro_w', 'skewness', 'slope_per10',
'slope_per90', 'stetson_k']
features.sort()
return features
if __name__ == '__main__':
print(get_feature_set()) |
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.shallow_run | python | def shallow_run(self):
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std) | Derive not-period-based features. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L86-L139 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.deep_run | python | def deep_run(self):
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag) | Derive period-based features. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L141-L166 | [
"def get_period_LS(self, date, mag, n_threads, min_period):\n \"\"\"\n Period finding using the Lomb-Scargle algorithm.\n\n Finding two periods. The second period is estimated after whitening\n the first period. Calculating various other features as well\n using derived periods.\n\n Parameters\n ----------\n date : array_like\n An array of observed date, in days.\n mag : array_like\n An array of observed magnitude.\n n_threads : int\n The number of threads to use.\n min_period : float\n The minimum period to calculate.\n \"\"\"\n\n # DO NOT CHANGE THESE PARAMETERS.\n oversampling = 3.\n hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)\n\n # Minimum hifac\n if hifac < 100:\n hifac = 100\n\n # Lomb-Scargle.\n fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,\n n_threads)\n\n self.f = fx[jmax]\n self.period = 1. / self.f\n self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)\n self.period_log10FAP = \\\n np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])\n # self.f_SNR1 = fy[jmax] / np.median(fy)\n self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)\n\n # Fit Fourier Series of order 3.\n order = 3\n # Initial guess of Fourier coefficients.\n p0 = np.ones(order * 2 + 1)\n date_period = (date % self.period) / self.period\n p1, success = leastsq(self.residuals, p0,\n args=(date_period, mag, order))\n # fitted_y = self.FourierSeries(p1, date_period, order)\n\n # print p1, self.mean, self.median\n # plt.plot(date_period, self.mag, 'b+')\n # plt.show()\n\n # Derive Fourier features for the first period.\n # Petersen, J. O., 1986, A&A\n self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)\n self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude\n self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude\n self.f_phase = np.arctan(-p1[1] / p1[2])\n self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase\n self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase\n\n \"\"\"\n # Derive a second period.\n # Whitening a light curve.\n residual_mag = mag - fitted_y\n\n # Lomb-Scargle again to find the second period.\n omega_top, power_top = search_frequencies(date, residual_mag, err,\n #LS_kwargs={'generalized':True, 'subtract_mean':True},\n n_eval=5000, n_retry=3, n_save=50)\n\n self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]\n self.f2 = 1. / self.period2\n self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \\\n * (len(self.date) - 1) / 2.\n\n # Fit Fourier Series again.\n p0 = [1.] * order * 2\n date_period = (date % self.period) / self.period\n p2, success = leastsq(self.residuals, p0,\n args=(date_period, residual_mag, order))\n fitted_y = self.FourierSeries(p2, date_period, order)\n\n #plt.plot(date%self.period2, residual_mag, 'b+')\n #plt.show()\n\n # Derive Fourier features for the first second.\n self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)\n self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp\n self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp\n self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp\n self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp\n self.f2_phase = np.arctan(-p2[1] / p2[2])\n self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase\n self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase\n self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase\n self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase\n\n # Calculate features using the first and second periods.\n self.f12_ratio = self.f2 / self.f1\n self.f12_remain = self.f1 % self.f2 \\\n if self.f1 > self.f2 else self.f2 % self.f1\n self.f12_amp = self.f2_amp / self.f1_amp\n self.f12_phase = self.f2_phase - self.f1_phase\n \"\"\"\n",
"def get_eta(self, mag, std):\n \"\"\"\n Return Eta feature.\n\n Parameters\n ----------\n mag : array_like\n An array of magnitudes.\n std : array_like\n A standard deviation of magnitudes.\n\n Returns\n -------\n eta : float\n The value of Eta index.\n \"\"\"\n\n diff = mag[1:] - mag[:len(mag) - 1]\n eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std\n\n return eta\n",
"def slope_percentile(self, date, mag):\n \"\"\"\n Return 10% and 90% percentile of slope.\n\n Parameters\n ----------\n date : array_like\n An array of phase-folded date. Sorted.\n mag : array_like\n An array of phase-folded magnitudes. Sorted by date.\n\n Returns\n -------\n per_10 : float\n 10% percentile values of slope.\n per_90 : float\n 90% percentile values of slope.\n \"\"\"\n\n date_diff = date[1:] - date[:len(date) - 1]\n mag_diff = mag[1:] - mag[:len(mag) - 1]\n\n # Remove zero mag_diff.\n index = np.where(mag_diff != 0.)\n date_diff = date_diff[index]\n mag_diff = mag_diff[index]\n\n # Derive slope.\n slope = date_diff / mag_diff\n\n percentile_10 = np.percentile(slope, 10.)\n percentile_90 = np.percentile(slope, 90.)\n\n return percentile_10, percentile_90\n",
"def get_cusum(self, mag):\n \"\"\"\n Return max - min of cumulative sum.\n\n Parameters\n ----------\n mag : array_like\n An array of magnitudes.\n\n Returns\n -------\n mm_cusum : float\n Max - min of cumulative sum.\n \"\"\"\n\n c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std\n\n return np.max(c) - np.min(c)\n"
] | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.get_period_LS | python | def get_period_LS(self, date, mag, n_threads, min_period):
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
""" | Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L168-L273 | [
"def fasper(x, y, ofac, hifac, n_threads, MACC=4):\n \"\"\"\n Given abscissas x (which need not be equally spaced) and ordinates\n y, and given a desired oversampling factor ofac (a typical value\n being 4 or larger). this routine creates an array wk1 with a\n sequence of nout increasing frequencies (not angular frequencies)\n up to hifac times the \"average\" Nyquist frequency, and creates\n an array wk2 with the values of the Lomb normalized periodogram at\n those frequencies. The arrays x and y are not altered. This\n routine also returns jmax such that wk2(jmax) is the maximum\n element in wk2, and prob, an estimate of the significance of that\n maximum against the hypothesis of random noise. A small value of prob\n indicates that a significant periodic signal is present.\n\n Reference: \n Press, W. H. & Rybicki, G. B. 1989\n ApJ vol. 338, p. 277-280.\n Fast algorithm for spectral analysis of unevenly sampled data\n (1989ApJ...338..277P)\n\n Arguments:\n X : Abscissas array, (e.g. an array of times).\n Y : Ordinates array, (e.g. corresponding counts).\n Ofac : Oversampling factor.\n Hifac : Hifac * \"average\" Nyquist frequency = highest frequency\n for which values of the Lomb normalized periodogram will\n be calculated.\n n_threads : number of threads to use.\n\n Returns:\n Wk1 : An array of Lomb periodogram frequencies.\n Wk2 : An array of corresponding values of the Lomb periodogram.\n Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)\n Jmax : The array index corresponding to the MAX( Wk2 ).\n Prob : False Alarm Probability of the largest Periodogram value\n MACC : Number of interpolation points per 1/4 cycle\n of highest frequency\n\n History:\n 02/23/2009, v1.0, MF\n Translation of IDL code (orig. Numerical recipies)\n \"\"\"\n #Check dimensions of input arrays\n n = long(len(x))\n if n != len(y):\n print('Incompatible arrays.')\n return\n\n #print x, y, hifac, ofac\n\n nout = int(0.5*ofac*hifac*n)\n nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power\n nfreq = 64 # of 2 above nfreqt.\n\n while nfreq < nfreqt:\n nfreq = 2*nfreq\n\n ndim = long(2*nfreq)\n\n #Compute the mean, variance\n ave = y.mean()\n ##sample variance because the divisor is N-1\n var = ((y - y.mean())**2).sum()/(len(y) - 1) \n # and range of the data.\n xmin = x.min()\n xmax = x.max()\n xdif = xmax - xmin\n\n #extrapolate the data into the workspaces\n if is_pyfftw:\n wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.\n wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.\n else:\n wk1 = zeros(ndim, dtype='complex')\n wk2 = zeros(ndim, dtype='complex')\n\n fac = ndim/(xdif*ofac)\n fndim = ndim\n ck = ((x - xmin)*fac) % fndim\n ckk = (2.0*ck) % fndim\n\n for j in range(0, n):\n __spread__(y[j] - ave, wk1, ndim, ck[j], MACC)\n __spread__(1.0, wk2, ndim, ckk[j], MACC)\n\n #Take the Fast Fourier Transforms.\n if is_pyfftw:\n fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE',\n threads=n_threads)\n wk1 = fft_wk1() * len(wk1)\n fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE',\n threads=n_threads)\n wk2 = fft_wk2() * len(wk2)\n else:\n wk1 = ifft(wk1)*len(wk1)\n wk2 = ifft(wk2)*len(wk1)\n\n wk1 = wk1[1:nout + 1]\n wk2 = wk2[1:nout + 1]\n rwk1 = wk1.real\n iwk1 = wk1.imag\n rwk2 = wk2.real\n iwk2 = wk2.imag\n\n df = 1.0/(xdif*ofac)\n\n #Compute the Lomb value for each frequency\n hypo2 = 2.0*abs(wk2)\n hc2wt = rwk2/hypo2\n hs2wt = iwk2/hypo2\n\n cwt = sqrt(0.5 + hc2wt)\n swt = sign(hs2wt)*(sqrt(0.5 - hc2wt))\n den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2\n cterm = (cwt*rwk1 + swt*iwk1)**2./den\n sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den)\n\n wk1 = df*(arange(nout, dtype='float') + 1.)\n wk2 = (cterm + sterm)/(2.0*var)\n pmax = wk2.max()\n jmax = wk2.argmax()\n\n #Significance estimation\n #expy = exp(-wk2) \n #effm = 2.0*(nout)/ofac \n #sig = effm*expy\n #ind = (sig > 0.01).nonzero()\n #sig[ind] = 1.0-(1.0-expy[ind])**effm\n\n #Estimate significance of largest peak value\n expy = exp(-pmax) \n effm = 2.0*(nout)/ofac\n prob = effm*expy\n\n if prob > 0.01: \n prob = 1.0 - (1.0 - expy)**effm\n\n return wk1, wk2, nout, jmax, prob\n",
"def getSignificance(wk1, wk2, nout, ofac):\n \"\"\" \n Returns the peak false alarm probabilities\n Hence the lower is the probability and the more significant is the peak\n \"\"\"\n expy = exp(-wk2) \n effm = 2.0*(nout)/ofac \n sig = effm*expy\n ind = (sig > 0.01).nonzero()\n sig[ind] = 1.0 - (1.0 - expy[ind])**effm\n\n return sig\n",
"def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):\n \"\"\"\n Get uncertainty of a period.\n\n The uncertainty is defined as the half width of the frequencies\n around the peak, that becomes lower than average + standard deviation\n of the power spectrum.\n\n Since we may not have fine resolution around the peak,\n we do not assume it is gaussian. So, no scaling factor of\n 2.355 (= 2 * sqrt(2 * ln2)) is applied.\n\n Parameters\n ----------\n fx : array_like\n An array of frequencies.\n fy : array_like\n An array of amplitudes.\n jmax : int\n An index at the peak frequency.\n fx_width : int, optional\n Width of power spectrum to calculate uncertainty.\n\n Returns\n -------\n p_uncertain : float\n Period uncertainty.\n \"\"\"\n\n # Get subset\n start_index = jmax - fx_width\n end_index = jmax + fx_width\n if start_index < 0:\n start_index = 0\n if end_index > len(fx) - 1:\n end_index = len(fx) - 1\n\n fx_subset = fx[start_index:end_index]\n fy_subset = fy[start_index:end_index]\n fy_mean = np.median(fy_subset)\n fy_std = np.std(fy_subset)\n\n # Find peak\n max_index = np.argmax(fy_subset)\n\n # Find list whose powers become lower than average + std.\n index = np.where(fy_subset <= fy_mean + fy_std)[0]\n\n # Find the edge at left and right. This is the full width.\n left_index = index[(index < max_index)]\n if len(left_index) == 0:\n left_index = 0\n else:\n left_index = left_index[-1]\n right_index = index[(index > max_index)]\n if len(right_index) == 0:\n right_index = len(fy_subset) - 1\n else:\n right_index = right_index[0]\n\n # We assume the half of the full width is the period uncertainty.\n half_width = (1. / fx_subset[left_index]\n - 1. / fx_subset[right_index]) / 2.\n period_uncertainty = half_width\n\n return period_uncertainty\n"
] | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.get_period_uncertainty | python | def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty | Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L275-L340 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.residuals | python | def residuals(self, pars, x, y, order):
return y - self.fourier_series(pars, x, order) | Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L342-L358 | [
"def fourier_series(self, pars, x, order):\n \"\"\"\n Function to fit Fourier Series.\n\n Parameters\n ----------\n x : array_like\n An array of date divided by period. It doesn't need to be sorted.\n pars : array_like\n Fourier series parameters.\n order : int\n An order of Fourier series.\n \"\"\"\n\n sum = pars[0]\n for i in range(order):\n sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \\\n + pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)\n\n return sum\n"
] | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.fourier_series | python | def fourier_series(self, pars, x, order):
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum | Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L360-L379 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.get_stetson_k | python | def get_stetson_k(self, mag, avg, err):
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k | Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L381-L404 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.half_mag_amplitude_ratio | python | def half_mag_amplitude_ratio(self, mag, avg, weight):
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std) | Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L406-L449 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.half_mag_amplitude_ratio2 | python | def half_mag_amplitude_ratio2(self, mag, avg):
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum) | Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L451-L486 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.get_eta | python | def get_eta(self, mag, std):
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta | Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L488-L508 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.slope_percentile | python | def slope_percentile(self, date, mag):
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90 | Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L510-L543 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.get_cusum | python | def get_cusum(self, mag):
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c) | Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L545-L562 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.get_features2 | python | def get_features2(self):
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values | Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L564-L600 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
dwkim78/upsilon | upsilon/extract_features/extract_features.py | ExtractFeatures.get_features_all | python | def get_features_all(self):
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features | Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L631-L654 | null | class ExtractFeatures:
"""
Extract variability features of a light curve.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
err : array_like, optional
An array of magnitude error. If None, std(mag) will be used.
n_threads : int, optional
The number of cores to use to derive periods.
min_period : float, optional
The minimum period to calculate.
"""
def __init__(self, date, mag, err=None, n_threads=4, min_period=0.03):
# Set basic values.
if not isinstance(date, np.ndarray):
date = np.array(date)
if not isinstance(mag, np.ndarray):
mag = np.array(mag)
self.date = date
self.mag = mag
if err is not None:
if not isinstance(err, np.ndarray):
err = np.array(err)
self.err = err
else:
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Check length.
if (len(self.date) != len(self.mag)) or \
(len(self.date) != len(self.err)) or \
(len(self.mag) != len(self.err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# if the number of data points is too small.
min_n_data = 80
if len(self.date) < min_n_data:
warnings.warn('The number of data points are less than %d.'
% min_n_data)
n_threads = int(n_threads)
if n_threads > multiprocessing.cpu_count():
self.n_threads = multiprocessing.cpu_count()
else:
if n_threads <= 0:
self.n_threads = 1
else:
self.n_threads = n_threads
min_period = float(min_period)
if min_period <= 0:
self.min_period = 0.03
else:
self.min_period = min_period
def run(self):
"""Run feature extraction modules."""
# shallow_run must be executed prior to deep_run
# since shallow_run calculates several values needed for deep_run.
self.shallow_run()
self.deep_run()
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
def get_features(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
'''
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
'''
return self.get_features_all()
|
dwkim78/upsilon | upsilon/extract_features/is_period_alias.py | is_period_alias | python | def is_period_alias(period):
# Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014).
# Period alias occurs mostly at ~1 and ~30.
# Check each 1, 2, 3, 4, 5 factors.
for i in range(1, 6):
# One-day and one-month alias
if (.99 / float(i)) < period < (1.004 / float(i)):
return True
if (1.03 / float(i)) < period < (1.04 / float(i)):
return True
if (29.2 / float(i)) < period < (29.9 / float(i)):
return True
# From candidates from the two fields 01, 08.
# All of them are close to one day (or sidereal) alias.
if (0.96465 / float(i)) < period < (0.96485 / float(i)):
return True
if (0.96725 / float(i)) < period < (0.96745 / float(i)):
return True
if (0.98190 / float(i)) < period < (0.98230 / float(i)):
return True
if (1.01034 / float(i)) < period < (1.01076 / float(i)):
return True
if (1.01568 / float(i)) < period < (1.01604 / float(i)):
return True
if (1.01718 / float(i)) < period < (1.01742 / float(i)):
return True
# From the all candidates from the entire LMC fields.
# Some of these could be overlapped with the above cuts.
if (0.50776 / float(i)) < period < (0.50861 / float(i)):
return True
if (0.96434 / float(i)) < period < (0.9652 / float(i)):
return True
if (0.96688 / float(i)) < period < (0.96731 / float(i)):
return True
if (1.0722 / float(i)) < period < (1.0729 / float(i)):
return True
if (27.1 / float(i)) < period < (27.5 / float(i)):
return True
# Not in the range of any alias.
return False | Check if a given period is possibly an alias.
Parameters
----------
period : float
A period to test if it is a possible alias or not.
Returns
-------
is_alias : boolean
True if the given period is in a range of period alias. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/is_period_alias.py#L1-L57 | null | |
dwkim78/upsilon | upsilon/datasets/base.py | load_EROS_lc | python | def load_EROS_lc(filename='lm0010n22323.time'):
module_path = dirname(__file__)
file_path = join(module_path, 'lightcurves', filename)
data = np.loadtxt(file_path)
date = data[:, 0]
mag = data[:, 1]
err = data[:, 2]
return date, mag, err | Read an EROS light curve and return its data.
Parameters
----------
filename : str, optional
A light-curve filename.
Returns
-------
dates : numpy.ndarray
An array of dates.
magnitudes : numpy.ndarray
An array of magnitudes.
errors : numpy.ndarray
An array of magnitudes errors. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/datasets/base.py#L12-L39 | null | """
Base IO code for all datasets
"""
import sys
import numpy as np
from os.path import dirname
from os.path import join
def load_rf_model():
"""
Return the UPSILoN random forests classifier.
The classifier is trained using OGLE and EROS periodic variables
(Kim et al. 2015).
Returns
-------
clf : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests classifier.
"""
import gzip
try:
import cPickle as pickle
except:
import pickle
module_path = dirname(__file__)
file_path = join(module_path, 'models/rf.model.sub.github.gz')
# For Python 3.
if sys.version_info.major >= 3:
clf = pickle.load(gzip.open(file_path, 'rb'), encoding='latin1')
# For Python 2.
else:
clf = pickle.load(gzip.open(file_path, 'rb'))
return clf
|
dwkim78/upsilon | upsilon/datasets/base.py | load_rf_model | python | def load_rf_model():
import gzip
try:
import cPickle as pickle
except:
import pickle
module_path = dirname(__file__)
file_path = join(module_path, 'models/rf.model.sub.github.gz')
# For Python 3.
if sys.version_info.major >= 3:
clf = pickle.load(gzip.open(file_path, 'rb'), encoding='latin1')
# For Python 2.
else:
clf = pickle.load(gzip.open(file_path, 'rb'))
return clf | Return the UPSILoN random forests classifier.
The classifier is trained using OGLE and EROS periodic variables
(Kim et al. 2015).
Returns
-------
clf : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests classifier. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/datasets/base.py#L42-L71 | null | """
Base IO code for all datasets
"""
import sys
import numpy as np
from os.path import dirname
from os.path import join
def load_EROS_lc(filename='lm0010n22323.time'):
"""
Read an EROS light curve and return its data.
Parameters
----------
filename : str, optional
A light-curve filename.
Returns
-------
dates : numpy.ndarray
An array of dates.
magnitudes : numpy.ndarray
An array of magnitudes.
errors : numpy.ndarray
An array of magnitudes errors.
"""
module_path = dirname(__file__)
file_path = join(module_path, 'lightcurves', filename)
data = np.loadtxt(file_path)
date = data[:, 0]
mag = data[:, 1]
err = data[:, 2]
return date, mag, err
|
dwkim78/upsilon | upsilon/extract_features/period_LS_pyfftw.py | fasper | python | def fasper(x, y, ofac, hifac, n_threads, MACC=4):
#Check dimensions of input arrays
n = long(len(x))
if n != len(y):
print('Incompatible arrays.')
return
#print x, y, hifac, ofac
nout = int(0.5*ofac*hifac*n)
nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power
nfreq = 64 # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2*nfreq
ndim = long(2*nfreq)
#Compute the mean, variance
ave = y.mean()
##sample variance because the divisor is N-1
var = ((y - y.mean())**2).sum()/(len(y) - 1)
# and range of the data.
xmin = x.min()
xmax = x.max()
xdif = xmax - xmin
#extrapolate the data into the workspaces
if is_pyfftw:
wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
else:
wk1 = zeros(ndim, dtype='complex')
wk2 = zeros(ndim, dtype='complex')
fac = ndim/(xdif*ofac)
fndim = ndim
ck = ((x - xmin)*fac) % fndim
ckk = (2.0*ck) % fndim
for j in range(0, n):
__spread__(y[j] - ave, wk1, ndim, ck[j], MACC)
__spread__(1.0, wk2, ndim, ckk[j], MACC)
#Take the Fast Fourier Transforms.
if is_pyfftw:
fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk1 = fft_wk1() * len(wk1)
fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk2 = fft_wk2() * len(wk2)
else:
wk1 = ifft(wk1)*len(wk1)
wk2 = ifft(wk2)*len(wk1)
wk1 = wk1[1:nout + 1]
wk2 = wk2[1:nout + 1]
rwk1 = wk1.real
iwk1 = wk1.imag
rwk2 = wk2.real
iwk2 = wk2.imag
df = 1.0/(xdif*ofac)
#Compute the Lomb value for each frequency
hypo2 = 2.0*abs(wk2)
hc2wt = rwk2/hypo2
hs2wt = iwk2/hypo2
cwt = sqrt(0.5 + hc2wt)
swt = sign(hs2wt)*(sqrt(0.5 - hc2wt))
den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2
cterm = (cwt*rwk1 + swt*iwk1)**2./den
sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den)
wk1 = df*(arange(nout, dtype='float') + 1.)
wk2 = (cterm + sterm)/(2.0*var)
pmax = wk2.max()
jmax = wk2.argmax()
#Significance estimation
#expy = exp(-wk2)
#effm = 2.0*(nout)/ofac
#sig = effm*expy
#ind = (sig > 0.01).nonzero()
#sig[ind] = 1.0-(1.0-expy[ind])**effm
#Estimate significance of largest peak value
expy = exp(-pmax)
effm = 2.0*(nout)/ofac
prob = effm*expy
if prob > 0.01:
prob = 1.0 - (1.0 - expy)**effm
return wk1, wk2, nout, jmax, prob | Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies) | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/period_LS_pyfftw.py#L95-L232 | null | """
Fast algorithm for spectral analysis of unevenly sampled data
-------------------------------------------------------------------------------
Modified by Dae-Won Kim.
The code is originally from:
http://www.astropython.org/snippet/2010/9/Fast-Lomb-Scargle-algorithm
28/04/2015 Performance improvement is pursued using the pyfftw.
Generally, it becomes ~40% faster than the original code
(tested on Mac OS X 10.9+). pyfftw also utilizes multiple cores.
04/07/2014 Performance improvement is pursued using Cython,
but there is almost no improvement. This is because
the most time-consuming part of the routine is numpy.ifft(),
which is already highly optimized. Consequently, I decide to
stick to the original code.
29/06/2014 Performance is compared with
AstroML.time_series.search_frequencies.
Given that Delta Scuti stars have very short period,
I need a very fine and wide range of frequency grid to calculate.
In such case, I observed this routine works better,
in terms of both speed and accuracy, mainly because the AstroML function
needs 'initial guess' and also 'limit_fractions' well defined.
-------------------------------------------------------------------------------
The Lomb-Scargle method performs spectral analysis on unevenly sampled
data and is known to be a powerful way to find, and test the
significance of, weak periodic signals. The method has previously been
thought to be 'slow', requiring of order 10(2)N(2) operations to analyze
N data points. We show that Fast Fourier Transforms (FFTs) can be used
in a novel way to make the computation of order 10(2)N log N. Despite
its use of the FFT, the algorithm is in no way equivalent to
conventional FFT periodogram analysis.
Keywords:
DATA SAMPLING, FAST FOURIER TRANSFORMATIONS,
SPECTRUM ANALYSIS, SIGNAL PROCESSING
Example:
> import numpy
> import lomb
> x = numpy.arange(10)
> y = numpy.sin(x)
> fx,fy, nout, jmax, prob = lomb.fasper(x,y, 6., 6.)
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
bib code: 1989ApJ...338..277P
"""
from numpy import *
try:
import pyfftw
is_pyfftw = True
except:
from numpy.fft import *
is_pyfftw = False
def __spread__(y, yy, n, x, m):
"""
Given an array yy(0:n-1), extrapolate (spread) a value y into
m actual array elements that best approximate the "fictional"
(i.e., possible noninteger) array element number x. The weights
used are coefficients of the Lagrange interpolating polynomial
"""
nfac = [0, 1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
if m > 10. :
print('factorial table too small in spread')
return
ix = long(x)
if x == float(ix):
yy[ix] = yy[ix]+y
else:
ilo = long(x - 0.5*float(m) + 1.0)
ilo = min(max(ilo, 1), n - m + 1)
ihi = ilo + m - 1
nden = nfac[m]
fac = x - ilo
for j in range(ilo + 1, ihi + 1): fac = fac*(x - j)
yy[ihi] = yy[ihi] + y*fac/(nden*(x - ihi))
for j in range(ihi - 1, ilo - 1, -1):
nden = (nden/(j + 1 - ilo))*(j - ihi)
yy[j] = yy[j] + y*fac/(nden*(x - j))
def fasper(x, y, ofac, hifac, n_threads, MACC=4):
"""
Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies)
"""
#Check dimensions of input arrays
n = long(len(x))
if n != len(y):
print('Incompatible arrays.')
return
#print x, y, hifac, ofac
nout = int(0.5*ofac*hifac*n)
nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power
nfreq = 64 # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2*nfreq
ndim = long(2*nfreq)
#Compute the mean, variance
ave = y.mean()
##sample variance because the divisor is N-1
var = ((y - y.mean())**2).sum()/(len(y) - 1)
# and range of the data.
xmin = x.min()
xmax = x.max()
xdif = xmax - xmin
#extrapolate the data into the workspaces
if is_pyfftw:
wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
else:
wk1 = zeros(ndim, dtype='complex')
wk2 = zeros(ndim, dtype='complex')
fac = ndim/(xdif*ofac)
fndim = ndim
ck = ((x - xmin)*fac) % fndim
ckk = (2.0*ck) % fndim
for j in range(0, n):
__spread__(y[j] - ave, wk1, ndim, ck[j], MACC)
__spread__(1.0, wk2, ndim, ckk[j], MACC)
#Take the Fast Fourier Transforms.
if is_pyfftw:
fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk1 = fft_wk1() * len(wk1)
fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk2 = fft_wk2() * len(wk2)
else:
wk1 = ifft(wk1)*len(wk1)
wk2 = ifft(wk2)*len(wk1)
wk1 = wk1[1:nout + 1]
wk2 = wk2[1:nout + 1]
rwk1 = wk1.real
iwk1 = wk1.imag
rwk2 = wk2.real
iwk2 = wk2.imag
df = 1.0/(xdif*ofac)
#Compute the Lomb value for each frequency
hypo2 = 2.0*abs(wk2)
hc2wt = rwk2/hypo2
hs2wt = iwk2/hypo2
cwt = sqrt(0.5 + hc2wt)
swt = sign(hs2wt)*(sqrt(0.5 - hc2wt))
den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2
cterm = (cwt*rwk1 + swt*iwk1)**2./den
sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den)
wk1 = df*(arange(nout, dtype='float') + 1.)
wk2 = (cterm + sterm)/(2.0*var)
pmax = wk2.max()
jmax = wk2.argmax()
#Significance estimation
#expy = exp(-wk2)
#effm = 2.0*(nout)/ofac
#sig = effm*expy
#ind = (sig > 0.01).nonzero()
#sig[ind] = 1.0-(1.0-expy[ind])**effm
#Estimate significance of largest peak value
expy = exp(-pmax)
effm = 2.0*(nout)/ofac
prob = effm*expy
if prob > 0.01:
prob = 1.0 - (1.0 - expy)**effm
return wk1, wk2, nout, jmax, prob
def getSignificance(wk1, wk2, nout, ofac):
"""
Returns the peak false alarm probabilities
Hence the lower is the probability and the more significant is the peak
"""
expy = exp(-wk2)
effm = 2.0*(nout)/ofac
sig = effm*expy
ind = (sig > 0.01).nonzero()
sig[ind] = 1.0 - (1.0 - expy[ind])**effm
return sig
|
dwkim78/upsilon | upsilon/predict/predict.py | predict | python | def predict(rf_model, features):
import numpy as np
from upsilon.extract_features.feature_set import get_feature_set
feature_set = get_feature_set()
# Grab only necessary features.
cols = [feature for feature in features if feature in feature_set]
cols = sorted(cols)
filtered_features = []
for i in range(len(cols)):
filtered_features.append(features[cols[i]])
filtered_features = np.array(filtered_features).reshape(1, -1)
# Classify.
classes = rf_model.classes_
# Note that we're classifying a single source, so [0] need tobe added.
probabilities = rf_model.predict_proba(filtered_features)[0]
# Classification flag.
flag = 0
if features['period_SNR'] < 20. or is_period_alias(features['period']):
flag = 1
# Return class, probability, and flag.
max_index = np.where(probabilities == np.max(probabilities))
return classes[max_index][0], probabilities[max_index][0], flag | Return label and probability estimated.
Parameters
----------
rf_model : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests model.
features : array_like
A list of features estimated by UPSILoN.
Returns
-------
label : str
A predicted label (i.e. class).
probability : float
Class probability.
flag : int
Classification flag. | train | https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/predict/predict.py#L4-L50 | [
"def is_period_alias(period):\n \"\"\"\n Check if a given period is possibly an alias.\n\n Parameters\n ----------\n period : float\n A period to test if it is a possible alias or not.\n\n Returns\n -------\n is_alias : boolean\n True if the given period is in a range of period alias.\n \"\"\"\n\n # Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014).\n # Period alias occurs mostly at ~1 and ~30.\n # Check each 1, 2, 3, 4, 5 factors.\n for i in range(1, 6):\n # One-day and one-month alias\n if (.99 / float(i)) < period < (1.004 / float(i)):\n return True\n if (1.03 / float(i)) < period < (1.04 / float(i)):\n return True\n if (29.2 / float(i)) < period < (29.9 / float(i)):\n return True\n\n # From candidates from the two fields 01, 08.\n # All of them are close to one day (or sidereal) alias.\n if (0.96465 / float(i)) < period < (0.96485 / float(i)):\n return True\n if (0.96725 / float(i)) < period < (0.96745 / float(i)):\n return True\n if (0.98190 / float(i)) < period < (0.98230 / float(i)):\n return True\n if (1.01034 / float(i)) < period < (1.01076 / float(i)):\n return True\n if (1.01568 / float(i)) < period < (1.01604 / float(i)):\n return True\n if (1.01718 / float(i)) < period < (1.01742 / float(i)):\n return True\n\n # From the all candidates from the entire LMC fields.\n # Some of these could be overlapped with the above cuts.\n if (0.50776 / float(i)) < period < (0.50861 / float(i)):\n return True\n if (0.96434 / float(i)) < period < (0.9652 / float(i)):\n return True\n if (0.96688 / float(i)) < period < (0.96731 / float(i)):\n return True\n if (1.0722 / float(i)) < period < (1.0729 / float(i)):\n return True\n if (27.1 / float(i)) < period < (27.5 / float(i)):\n return True\n\n # Not in the range of any alias.\n return False\n",
"def get_feature_set():\n \"\"\"\n Return a list of features' names.\n\n Features' name that are used to train a model and predict a class.\n Sorted by the names.\n\n Returns\n -------\n feature_names : list\n A list of features' names.\n \"\"\"\n\n features = ['amplitude', 'hl_amp_ratio', 'kurtosis', 'period',\n 'phase_cusum', 'phase_eta', 'phi21', 'phi31', 'quartile31',\n 'r21', 'r31', 'shapiro_w', 'skewness', 'slope_per10',\n 'slope_per90', 'stetson_k']\n features.sort()\n\n return features\n"
] | from upsilon.extract_features.is_period_alias import is_period_alias
|
unbit/sftpclone | sftpclone/sftpclone.py | configure_logging | python | def configure_logging(level=logging.DEBUG):
if level == logging.DEBUG:
# For debugging purposes, log from everyone!
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger | Configure the module logging engine. | train | https://github.com/unbit/sftpclone/blob/1cc89478e680fc4e0d12b1a15b5bafd0390d05da/sftpclone/sftpclone.py#L33-L50 | null | #!/usr/bin/env python
# coding=utf-8
# Python 2.7 backward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import paramiko
import paramiko.py3compat
import os
import os.path
import sys
import errno
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_IMODE, S_IFMT
import argparse
import logging
from getpass import getuser, getpass
import glob
import socket
"""SFTPClone: sync local and remote directories."""
logger = None
try:
# Not available in Python 2.x
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def path_join(*args):
"""
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
"""
args = (paramiko.py3compat.u(arg) for arg in args)
return os.path.join(*args)
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
"""
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path
def get_ssh_agent_keys(logger):
"""
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
"""
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys
class SFTPClone(object):
"""The SFTPClone class."""
def __init__(self, local_path, remote_url,
identity_files=None, port=None, fix_symlinks=False,
ssh_config_path=None, ssh_agent=False,
exclude_file=None, known_hosts_path=None,
delete=True, allow_unknown=False,
create_remote_directory=False,
):
"""Init the needed parameters and the SFTPClient."""
self.local_path = os.path.realpath(os.path.expanduser(local_path))
self.logger = logger or configure_logging()
self.create_remote_directory = create_remote_directory
if not os.path.exists(self.local_path):
self.logger.error("Local path MUST exist. Exiting.")
sys.exit(1)
if exclude_file:
with open(exclude_file) as f:
# As in rsync's exclude from, ignore lines with leading ; and #
# and treat each path as relative (thus by removing the leading
# /)
exclude_list = [
line.rstrip().lstrip("/")
for line in f
if not line.startswith((";", "#"))
]
# actually, is a set of excluded files
self.exclude_list = {
g
for pattern in exclude_list
for g in glob.glob(path_join(self.local_path, pattern))
}
else:
self.exclude_list = set()
username, password, hostname, self.remote_path = parse_username_password_hostname(remote_url)
identity_files = identity_files or []
proxy_command = None
if ssh_config_path:
try:
with open(os.path.expanduser(ssh_config_path)) as c_file:
ssh_config = paramiko.SSHConfig()
ssh_config.parse(c_file)
c = ssh_config.lookup(hostname)
hostname = c.get("hostname", hostname)
username = c.get("user", username)
port = int(c.get("port", port))
identity_files = c.get("identityfile", identity_files)
proxy_command = c.get("proxycommand")
except Exception as e:
# it could be safe to continue anyway,
# because parameters could have been manually specified
self.logger.error(
"Error while parsing ssh_config file: %s. Trying to continue anyway...", e
)
# Set default values
if not username:
username = getuser() # defaults to current user
port = port or 22
allow_unknown = allow_unknown or False
self.chown = False
self.fix_symlinks = fix_symlinks or False
self.delete = delete if delete is not None else True
if ssh_agent:
agent, agent_keys = get_ssh_agent_keys(self.logger)
else:
agent, agent_keys = None, None
if not identity_files and not password and not agent_keys:
self.logger.error(
"You need to specify either a password, an identity or to enable the ssh-agent support."
)
sys.exit(1)
# only root can change file owner
if username == 'root':
self.chown = True
sock = (hostname, port)
if proxy_command is not None:
sock = paramiko.proxy.ProxyCommand(proxy_command)
try:
transport = paramiko.Transport(sock)
except socket.gaierror:
self.logger.error(
"Hostname not known. Are you sure you inserted it correctly?")
sys.exit(1)
try:
ssh_host = hostname if port == 22 else "[{}]:{}".format(hostname, port)
known_hosts = None
"""
Before starting the transport session, we have to configure it.
Specifically, we need to configure the preferred PK algorithm.
If the system already knows a public key of a specific kind for
a remote host, we have to peek its type as the preferred one.
"""
if known_hosts_path:
known_hosts = paramiko.HostKeys()
known_hosts_path = os.path.realpath(
os.path.expanduser(known_hosts_path))
try:
known_hosts.load(known_hosts_path)
except IOError:
self.logger.error(
"Error while loading known hosts file at {}. Exiting...".format(
known_hosts_path)
)
sys.exit(1)
known_keys = known_hosts.lookup(ssh_host)
if known_keys is not None:
# one or more keys are already known
# set their type as preferred
transport.get_security_options().key_types = \
tuple(known_keys.keys())
transport.start_client()
if not known_hosts:
self.logger.warning("Security warning: skipping known hosts check...")
else:
pubk = transport.get_remote_server_key()
if ssh_host in known_hosts.keys():
if not known_hosts.check(ssh_host, pubk):
self.logger.error(
"Security warning: "
"remote key fingerprint {} for hostname "
"{} didn't match the one in known_hosts {}. "
"Exiting...".format(
pubk.get_base64(),
ssh_host,
known_hosts.lookup(hostname),
)
)
sys.exit(1)
elif not allow_unknown:
prompt = ("The authenticity of host '{}' can't be established.\n"
"{} key is {}.\n"
"Are you sure you want to continue connecting? [y/n] ").format(
ssh_host, pubk.get_name(), pubk.get_base64())
try:
# Renamed to `input` in Python 3.x
response = raw_input(prompt)
except NameError:
response = input(prompt)
# Note: we do not modify the user's known_hosts file
if not (response == "y" or response == "yes"):
self.logger.error(
"Host authentication failed."
)
sys.exit(1)
def perform_key_auth(pkey):
try:
transport.auth_publickey(
username=username,
key=pkey
)
return True
except paramiko.SSHException:
self.logger.warning(
"Authentication with identity {}... failed".format(pkey.get_base64()[:10])
)
return False
if password: # Password auth, if specified.
transport.auth_password(
username=username,
password=password
)
elif agent_keys: # SSH agent keys have higher priority
for pkey in agent_keys:
if perform_key_auth(pkey):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
elif identity_files: # Then follow identity file (specified from CL or ssh_config)
# Try identity files one by one, until one works
for key_path in identity_files:
key_path = os.path.expanduser(key_path)
try:
key = paramiko.RSAKey.from_private_key_file(key_path)
except paramiko.PasswordRequiredException:
pk_password = getpass(
"It seems that your identity from '{}' is encrypted. "
"Please enter your password: ".format(key_path)
)
try:
key = paramiko.RSAKey.from_private_key_file(key_path, pk_password)
except paramiko.SSHException:
self.logger.error(
"Incorrect passphrase. Cannot decode private key from '{}'.".format(key_path)
)
continue
except IOError or paramiko.SSHException:
self.logger.error(
"Something went wrong while opening '{}'. Skipping it.".format(key_path)
)
continue
if perform_key_auth(key):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
else: # No authentication method specified, we shouldn't arrive here.
assert False
except paramiko.SSHException:
self.logger.error(
"None of the provided authentication methods worked. Exiting."
)
transport.close()
sys.exit(1)
finally:
if agent:
agent.close()
self.sftp = paramiko.SFTPClient.from_transport(transport)
if self.remote_path.startswith("~"):
# nasty hack to let getcwd work without changing dir!
self.sftp.chdir('.')
self.remote_path = self.remote_path.replace(
"~", self.sftp.getcwd()) # home is the initial sftp dir
@staticmethod
def _file_need_upload(l_st, r_st):
return True if \
l_st.st_size != r_st.st_size or int(l_st.st_mtime) != r_st.st_mtime \
else False
@staticmethod
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one."""
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False
def _match_modes(self, remote_path, l_st):
"""Match mod, utime and uid/gid with locals one."""
self.sftp.chmod(remote_path, S_IMODE(l_st.st_mode))
self.sftp.utime(remote_path, (l_st.st_atime, l_st.st_mtime))
if self.chown:
self.sftp.chown(remote_path, l_st.st_uid, l_st.st_gid)
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime."""
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st)
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node."""
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
)
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
"""
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
)
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e))
def node_check_for_upload_create(self, relative_path, f):
"""Check if the given directory tree node has to be uploaded/created on the remote folder."""
if not relative_path:
# we're at the root of the shared directory tree
relative_path = str()
# the (absolute) local address of f.
local_path = path_join(self.local_path, relative_path, f)
try:
l_st = os.lstat(local_path)
except OSError as e:
"""A little background here.
Sometimes, in big clusters configurations (mail, etc.),
files could disappear or be moved, suddenly.
There's nothing to do about it,
system should be stopped before doing backups.
Anyway, we log it, and skip it.
"""
self.logger.error("error while checking {}: {}".format(relative_path, e))
return
if local_path in self.exclude_list:
self.logger.info("Skipping excluded file %s.", local_path)
return
# the (absolute) remote address of f.
remote_path = path_join(self.remote_path, relative_path, f)
# First case: f is a directory
if S_ISDIR(l_st.st_mode):
# we check if the folder exists on the remote side
# it has to be a folder, otherwise it would have already been
# deleted
try:
self.sftp.stat(remote_path)
except IOError: # it doesn't exist yet on remote side
self.sftp.mkdir(remote_path)
self._match_modes(remote_path, l_st)
# now, we should traverse f too (recursion magic!)
self.check_for_upload_create(path_join(relative_path, f))
# Second case: f is a symbolic link
elif S_ISLNK(l_st.st_mode):
# read the local link
local_link = os.readlink(local_path)
absolute_local_link = os.path.realpath(local_link)
# is it absolute?
is_absolute = local_link.startswith("/")
# and does it point inside the shared directory?
# add trailing slash (security)
trailing_local_path = path_join(self.local_path, '')
relpath = os.path.commonprefix(
[absolute_local_link,
trailing_local_path]
) == trailing_local_path
if relpath:
relative_link = absolute_local_link[len(trailing_local_path):]
else:
relative_link = None
"""
# Refactor them all, be efficient!
# Case A: absolute link pointing outside shared directory
# (we can only update the remote part)
if is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case B: absolute link pointing inside shared directory
# (we can leave it as it is or fix the prefix to match the one of the remote server)
elif is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Case C: relative link pointing outside shared directory
# (all we can do is try to make the link anyway)
elif not is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case D: relative link pointing inside shared directory
# (we preserve the relativity and link it!)
elif not is_absolute and relpath:
self.create_update_symlink(local_link, remote_path)
"""
if is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
path_join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Third case: regular file
elif S_ISREG(l_st.st_mode):
try:
r_st = self.sftp.lstat(remote_path)
if self._file_need_upload(l_st, r_st):
self.file_upload(local_path, remote_path, l_st)
except IOError as e:
if e.errno == errno.ENOENT:
self.file_upload(local_path, remote_path, l_st)
# Anything else.
else:
self.logger.warning("Skipping unsupported file %s.", local_path)
def check_for_upload_create(self, relative_path=None):
"""Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree."""
for f in os.listdir(
path_join(
self.local_path, relative_path) if relative_path else self.local_path
):
self.node_check_for_upload_create(relative_path, f)
def run(self):
"""Run the sync.
Confront the local and the remote directories and perform the needed changes."""
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1)
def create_parser():
"""Create the CLI argument parser."""
parser = argparse.ArgumentParser(
description='Sync a local and a remote folder through SFTP.'
)
parser.add_argument(
"path",
type=str,
metavar="local-path",
help="the path of the local folder",
)
parser.add_argument(
"remote",
type=str,
metavar="user[:password]@hostname:remote-path",
help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. "
"The hostname can be specified as a ssh_config's hostname too. "
"Every missing information will be gathered from there",
)
parser.add_argument(
"-k",
"--key",
metavar="identity-path",
action="append",
help="private key identity path (defaults to ~/.ssh/id_rsa)"
)
parser.add_argument(
"-l",
"--logging",
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG',
'NOTSET'],
default='ERROR',
help="set logging level"
)
parser.add_argument(
"-p",
"--port",
default=22,
type=int,
help="SSH remote port (defaults to 22)"
)
parser.add_argument(
"-f",
"--fix-symlinks",
action="store_true",
help="fix symbolic links on remote side"
)
parser.add_argument(
"-a",
"--ssh-agent",
action="store_true",
help="enable ssh-agent support"
)
parser.add_argument(
"-c",
"--ssh-config",
metavar="ssh_config path",
default="~/.ssh/config",
type=str,
help="path to the ssh-configuration file (default to ~/.ssh/config)"
)
parser.add_argument(
"-n",
"--known-hosts",
metavar="known_hosts path",
default="~/.ssh/known_hosts",
type=str,
help="path to the openSSH known_hosts file"
)
parser.add_argument(
"-d",
"--disable-known-hosts",
action="store_true",
help="disable known_hosts fingerprint checking (security warning!)"
)
parser.add_argument(
"-e",
"--exclude-from",
metavar="exclude-from-file-path",
type=str,
help="exclude files matching pattern in exclude-from-file-path"
)
parser.add_argument(
"-t",
"--do-not-delete",
action="store_true",
help="do not delete remote files missing from local folder"
)
parser.add_argument(
"-o",
"--allow-unknown",
action="store_true",
help="allow connection to unknown hosts"
)
parser.add_argument(
"-r",
"--create-remote-directory",
action="store_true",
help="Create remote base directory if missing on remote"
)
return parser
def main(args=None):
"""The main."""
parser = create_parser()
args = vars(parser.parse_args(args))
log_mapping = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
log_level = log_mapping[args['logging']]
del(args['logging'])
global logger
logger = configure_logging(log_level)
args_mapping = {
"path": "local_path",
"remote": "remote_url",
"ssh_config": "ssh_config_path",
"exclude_from": "exclude_file",
"known_hosts": "known_hosts_path",
"do_not_delete": "delete",
"key": "identity_files",
}
kwargs = { # convert the argument names to class constructor parameters
args_mapping[k]: v
for k, v in args.items()
if v and k in args_mapping
}
kwargs.update({
k: v
for k, v in args.items()
if v and k not in args_mapping
})
# Special case: disable known_hosts check
if args['disable_known_hosts']:
kwargs['known_hosts_path'] = None
del(kwargs['disable_known_hosts'])
# Toggle `do_not_delete` flag
if "delete" in kwargs:
kwargs["delete"] = not kwargs["delete"]
# Manually set the default identity file.
kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"]
sync = SFTPClone(
**kwargs
)
sync.run()
if __name__ == '__main__':
main()
|
unbit/sftpclone | sftpclone/sftpclone.py | path_join | python | def path_join(*args):
args = (paramiko.py3compat.u(arg) for arg in args)
return os.path.join(*args) | Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes). | train | https://github.com/unbit/sftpclone/blob/1cc89478e680fc4e0d12b1a15b5bafd0390d05da/sftpclone/sftpclone.py#L53-L59 | null | #!/usr/bin/env python
# coding=utf-8
# Python 2.7 backward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import paramiko
import paramiko.py3compat
import os
import os.path
import sys
import errno
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_IMODE, S_IFMT
import argparse
import logging
from getpass import getuser, getpass
import glob
import socket
"""SFTPClone: sync local and remote directories."""
logger = None
try:
# Not available in Python 2.x
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def configure_logging(level=logging.DEBUG):
"""Configure the module logging engine."""
if level == logging.DEBUG:
# For debugging purposes, log from everyone!
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
"""
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path
def get_ssh_agent_keys(logger):
"""
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
"""
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys
class SFTPClone(object):
"""The SFTPClone class."""
def __init__(self, local_path, remote_url,
identity_files=None, port=None, fix_symlinks=False,
ssh_config_path=None, ssh_agent=False,
exclude_file=None, known_hosts_path=None,
delete=True, allow_unknown=False,
create_remote_directory=False,
):
"""Init the needed parameters and the SFTPClient."""
self.local_path = os.path.realpath(os.path.expanduser(local_path))
self.logger = logger or configure_logging()
self.create_remote_directory = create_remote_directory
if not os.path.exists(self.local_path):
self.logger.error("Local path MUST exist. Exiting.")
sys.exit(1)
if exclude_file:
with open(exclude_file) as f:
# As in rsync's exclude from, ignore lines with leading ; and #
# and treat each path as relative (thus by removing the leading
# /)
exclude_list = [
line.rstrip().lstrip("/")
for line in f
if not line.startswith((";", "#"))
]
# actually, is a set of excluded files
self.exclude_list = {
g
for pattern in exclude_list
for g in glob.glob(path_join(self.local_path, pattern))
}
else:
self.exclude_list = set()
username, password, hostname, self.remote_path = parse_username_password_hostname(remote_url)
identity_files = identity_files or []
proxy_command = None
if ssh_config_path:
try:
with open(os.path.expanduser(ssh_config_path)) as c_file:
ssh_config = paramiko.SSHConfig()
ssh_config.parse(c_file)
c = ssh_config.lookup(hostname)
hostname = c.get("hostname", hostname)
username = c.get("user", username)
port = int(c.get("port", port))
identity_files = c.get("identityfile", identity_files)
proxy_command = c.get("proxycommand")
except Exception as e:
# it could be safe to continue anyway,
# because parameters could have been manually specified
self.logger.error(
"Error while parsing ssh_config file: %s. Trying to continue anyway...", e
)
# Set default values
if not username:
username = getuser() # defaults to current user
port = port or 22
allow_unknown = allow_unknown or False
self.chown = False
self.fix_symlinks = fix_symlinks or False
self.delete = delete if delete is not None else True
if ssh_agent:
agent, agent_keys = get_ssh_agent_keys(self.logger)
else:
agent, agent_keys = None, None
if not identity_files and not password and not agent_keys:
self.logger.error(
"You need to specify either a password, an identity or to enable the ssh-agent support."
)
sys.exit(1)
# only root can change file owner
if username == 'root':
self.chown = True
sock = (hostname, port)
if proxy_command is not None:
sock = paramiko.proxy.ProxyCommand(proxy_command)
try:
transport = paramiko.Transport(sock)
except socket.gaierror:
self.logger.error(
"Hostname not known. Are you sure you inserted it correctly?")
sys.exit(1)
try:
ssh_host = hostname if port == 22 else "[{}]:{}".format(hostname, port)
known_hosts = None
"""
Before starting the transport session, we have to configure it.
Specifically, we need to configure the preferred PK algorithm.
If the system already knows a public key of a specific kind for
a remote host, we have to peek its type as the preferred one.
"""
if known_hosts_path:
known_hosts = paramiko.HostKeys()
known_hosts_path = os.path.realpath(
os.path.expanduser(known_hosts_path))
try:
known_hosts.load(known_hosts_path)
except IOError:
self.logger.error(
"Error while loading known hosts file at {}. Exiting...".format(
known_hosts_path)
)
sys.exit(1)
known_keys = known_hosts.lookup(ssh_host)
if known_keys is not None:
# one or more keys are already known
# set their type as preferred
transport.get_security_options().key_types = \
tuple(known_keys.keys())
transport.start_client()
if not known_hosts:
self.logger.warning("Security warning: skipping known hosts check...")
else:
pubk = transport.get_remote_server_key()
if ssh_host in known_hosts.keys():
if not known_hosts.check(ssh_host, pubk):
self.logger.error(
"Security warning: "
"remote key fingerprint {} for hostname "
"{} didn't match the one in known_hosts {}. "
"Exiting...".format(
pubk.get_base64(),
ssh_host,
known_hosts.lookup(hostname),
)
)
sys.exit(1)
elif not allow_unknown:
prompt = ("The authenticity of host '{}' can't be established.\n"
"{} key is {}.\n"
"Are you sure you want to continue connecting? [y/n] ").format(
ssh_host, pubk.get_name(), pubk.get_base64())
try:
# Renamed to `input` in Python 3.x
response = raw_input(prompt)
except NameError:
response = input(prompt)
# Note: we do not modify the user's known_hosts file
if not (response == "y" or response == "yes"):
self.logger.error(
"Host authentication failed."
)
sys.exit(1)
def perform_key_auth(pkey):
try:
transport.auth_publickey(
username=username,
key=pkey
)
return True
except paramiko.SSHException:
self.logger.warning(
"Authentication with identity {}... failed".format(pkey.get_base64()[:10])
)
return False
if password: # Password auth, if specified.
transport.auth_password(
username=username,
password=password
)
elif agent_keys: # SSH agent keys have higher priority
for pkey in agent_keys:
if perform_key_auth(pkey):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
elif identity_files: # Then follow identity file (specified from CL or ssh_config)
# Try identity files one by one, until one works
for key_path in identity_files:
key_path = os.path.expanduser(key_path)
try:
key = paramiko.RSAKey.from_private_key_file(key_path)
except paramiko.PasswordRequiredException:
pk_password = getpass(
"It seems that your identity from '{}' is encrypted. "
"Please enter your password: ".format(key_path)
)
try:
key = paramiko.RSAKey.from_private_key_file(key_path, pk_password)
except paramiko.SSHException:
self.logger.error(
"Incorrect passphrase. Cannot decode private key from '{}'.".format(key_path)
)
continue
except IOError or paramiko.SSHException:
self.logger.error(
"Something went wrong while opening '{}'. Skipping it.".format(key_path)
)
continue
if perform_key_auth(key):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
else: # No authentication method specified, we shouldn't arrive here.
assert False
except paramiko.SSHException:
self.logger.error(
"None of the provided authentication methods worked. Exiting."
)
transport.close()
sys.exit(1)
finally:
if agent:
agent.close()
self.sftp = paramiko.SFTPClient.from_transport(transport)
if self.remote_path.startswith("~"):
# nasty hack to let getcwd work without changing dir!
self.sftp.chdir('.')
self.remote_path = self.remote_path.replace(
"~", self.sftp.getcwd()) # home is the initial sftp dir
@staticmethod
def _file_need_upload(l_st, r_st):
return True if \
l_st.st_size != r_st.st_size or int(l_st.st_mtime) != r_st.st_mtime \
else False
@staticmethod
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one."""
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False
def _match_modes(self, remote_path, l_st):
"""Match mod, utime and uid/gid with locals one."""
self.sftp.chmod(remote_path, S_IMODE(l_st.st_mode))
self.sftp.utime(remote_path, (l_st.st_atime, l_st.st_mtime))
if self.chown:
self.sftp.chown(remote_path, l_st.st_uid, l_st.st_gid)
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime."""
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st)
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node."""
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
)
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
"""
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
)
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e))
def node_check_for_upload_create(self, relative_path, f):
"""Check if the given directory tree node has to be uploaded/created on the remote folder."""
if not relative_path:
# we're at the root of the shared directory tree
relative_path = str()
# the (absolute) local address of f.
local_path = path_join(self.local_path, relative_path, f)
try:
l_st = os.lstat(local_path)
except OSError as e:
"""A little background here.
Sometimes, in big clusters configurations (mail, etc.),
files could disappear or be moved, suddenly.
There's nothing to do about it,
system should be stopped before doing backups.
Anyway, we log it, and skip it.
"""
self.logger.error("error while checking {}: {}".format(relative_path, e))
return
if local_path in self.exclude_list:
self.logger.info("Skipping excluded file %s.", local_path)
return
# the (absolute) remote address of f.
remote_path = path_join(self.remote_path, relative_path, f)
# First case: f is a directory
if S_ISDIR(l_st.st_mode):
# we check if the folder exists on the remote side
# it has to be a folder, otherwise it would have already been
# deleted
try:
self.sftp.stat(remote_path)
except IOError: # it doesn't exist yet on remote side
self.sftp.mkdir(remote_path)
self._match_modes(remote_path, l_st)
# now, we should traverse f too (recursion magic!)
self.check_for_upload_create(path_join(relative_path, f))
# Second case: f is a symbolic link
elif S_ISLNK(l_st.st_mode):
# read the local link
local_link = os.readlink(local_path)
absolute_local_link = os.path.realpath(local_link)
# is it absolute?
is_absolute = local_link.startswith("/")
# and does it point inside the shared directory?
# add trailing slash (security)
trailing_local_path = path_join(self.local_path, '')
relpath = os.path.commonprefix(
[absolute_local_link,
trailing_local_path]
) == trailing_local_path
if relpath:
relative_link = absolute_local_link[len(trailing_local_path):]
else:
relative_link = None
"""
# Refactor them all, be efficient!
# Case A: absolute link pointing outside shared directory
# (we can only update the remote part)
if is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case B: absolute link pointing inside shared directory
# (we can leave it as it is or fix the prefix to match the one of the remote server)
elif is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Case C: relative link pointing outside shared directory
# (all we can do is try to make the link anyway)
elif not is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case D: relative link pointing inside shared directory
# (we preserve the relativity and link it!)
elif not is_absolute and relpath:
self.create_update_symlink(local_link, remote_path)
"""
if is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
path_join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Third case: regular file
elif S_ISREG(l_st.st_mode):
try:
r_st = self.sftp.lstat(remote_path)
if self._file_need_upload(l_st, r_st):
self.file_upload(local_path, remote_path, l_st)
except IOError as e:
if e.errno == errno.ENOENT:
self.file_upload(local_path, remote_path, l_st)
# Anything else.
else:
self.logger.warning("Skipping unsupported file %s.", local_path)
def check_for_upload_create(self, relative_path=None):
"""Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree."""
for f in os.listdir(
path_join(
self.local_path, relative_path) if relative_path else self.local_path
):
self.node_check_for_upload_create(relative_path, f)
def run(self):
"""Run the sync.
Confront the local and the remote directories and perform the needed changes."""
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1)
def create_parser():
"""Create the CLI argument parser."""
parser = argparse.ArgumentParser(
description='Sync a local and a remote folder through SFTP.'
)
parser.add_argument(
"path",
type=str,
metavar="local-path",
help="the path of the local folder",
)
parser.add_argument(
"remote",
type=str,
metavar="user[:password]@hostname:remote-path",
help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. "
"The hostname can be specified as a ssh_config's hostname too. "
"Every missing information will be gathered from there",
)
parser.add_argument(
"-k",
"--key",
metavar="identity-path",
action="append",
help="private key identity path (defaults to ~/.ssh/id_rsa)"
)
parser.add_argument(
"-l",
"--logging",
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG',
'NOTSET'],
default='ERROR',
help="set logging level"
)
parser.add_argument(
"-p",
"--port",
default=22,
type=int,
help="SSH remote port (defaults to 22)"
)
parser.add_argument(
"-f",
"--fix-symlinks",
action="store_true",
help="fix symbolic links on remote side"
)
parser.add_argument(
"-a",
"--ssh-agent",
action="store_true",
help="enable ssh-agent support"
)
parser.add_argument(
"-c",
"--ssh-config",
metavar="ssh_config path",
default="~/.ssh/config",
type=str,
help="path to the ssh-configuration file (default to ~/.ssh/config)"
)
parser.add_argument(
"-n",
"--known-hosts",
metavar="known_hosts path",
default="~/.ssh/known_hosts",
type=str,
help="path to the openSSH known_hosts file"
)
parser.add_argument(
"-d",
"--disable-known-hosts",
action="store_true",
help="disable known_hosts fingerprint checking (security warning!)"
)
parser.add_argument(
"-e",
"--exclude-from",
metavar="exclude-from-file-path",
type=str,
help="exclude files matching pattern in exclude-from-file-path"
)
parser.add_argument(
"-t",
"--do-not-delete",
action="store_true",
help="do not delete remote files missing from local folder"
)
parser.add_argument(
"-o",
"--allow-unknown",
action="store_true",
help="allow connection to unknown hosts"
)
parser.add_argument(
"-r",
"--create-remote-directory",
action="store_true",
help="Create remote base directory if missing on remote"
)
return parser
def main(args=None):
"""The main."""
parser = create_parser()
args = vars(parser.parse_args(args))
log_mapping = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
log_level = log_mapping[args['logging']]
del(args['logging'])
global logger
logger = configure_logging(log_level)
args_mapping = {
"path": "local_path",
"remote": "remote_url",
"ssh_config": "ssh_config_path",
"exclude_from": "exclude_file",
"known_hosts": "known_hosts_path",
"do_not_delete": "delete",
"key": "identity_files",
}
kwargs = { # convert the argument names to class constructor parameters
args_mapping[k]: v
for k, v in args.items()
if v and k in args_mapping
}
kwargs.update({
k: v
for k, v in args.items()
if v and k not in args_mapping
})
# Special case: disable known_hosts check
if args['disable_known_hosts']:
kwargs['known_hosts_path'] = None
del(kwargs['disable_known_hosts'])
# Toggle `do_not_delete` flag
if "delete" in kwargs:
kwargs["delete"] = not kwargs["delete"]
# Manually set the default identity file.
kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"]
sync = SFTPClone(
**kwargs
)
sync.run()
if __name__ == '__main__':
main()
|
unbit/sftpclone | sftpclone/sftpclone.py | parse_username_password_hostname | python | def parse_username_password_hostname(remote_url):
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path | Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path. | train | https://github.com/unbit/sftpclone/blob/1cc89478e680fc4e0d12b1a15b5bafd0390d05da/sftpclone/sftpclone.py#L62-L85 | null | #!/usr/bin/env python
# coding=utf-8
# Python 2.7 backward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import paramiko
import paramiko.py3compat
import os
import os.path
import sys
import errno
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_IMODE, S_IFMT
import argparse
import logging
from getpass import getuser, getpass
import glob
import socket
"""SFTPClone: sync local and remote directories."""
logger = None
try:
# Not available in Python 2.x
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def configure_logging(level=logging.DEBUG):
"""Configure the module logging engine."""
if level == logging.DEBUG:
# For debugging purposes, log from everyone!
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def path_join(*args):
"""
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
"""
args = (paramiko.py3compat.u(arg) for arg in args)
return os.path.join(*args)
def get_ssh_agent_keys(logger):
"""
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
"""
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys
class SFTPClone(object):
"""The SFTPClone class."""
def __init__(self, local_path, remote_url,
identity_files=None, port=None, fix_symlinks=False,
ssh_config_path=None, ssh_agent=False,
exclude_file=None, known_hosts_path=None,
delete=True, allow_unknown=False,
create_remote_directory=False,
):
"""Init the needed parameters and the SFTPClient."""
self.local_path = os.path.realpath(os.path.expanduser(local_path))
self.logger = logger or configure_logging()
self.create_remote_directory = create_remote_directory
if not os.path.exists(self.local_path):
self.logger.error("Local path MUST exist. Exiting.")
sys.exit(1)
if exclude_file:
with open(exclude_file) as f:
# As in rsync's exclude from, ignore lines with leading ; and #
# and treat each path as relative (thus by removing the leading
# /)
exclude_list = [
line.rstrip().lstrip("/")
for line in f
if not line.startswith((";", "#"))
]
# actually, is a set of excluded files
self.exclude_list = {
g
for pattern in exclude_list
for g in glob.glob(path_join(self.local_path, pattern))
}
else:
self.exclude_list = set()
username, password, hostname, self.remote_path = parse_username_password_hostname(remote_url)
identity_files = identity_files or []
proxy_command = None
if ssh_config_path:
try:
with open(os.path.expanduser(ssh_config_path)) as c_file:
ssh_config = paramiko.SSHConfig()
ssh_config.parse(c_file)
c = ssh_config.lookup(hostname)
hostname = c.get("hostname", hostname)
username = c.get("user", username)
port = int(c.get("port", port))
identity_files = c.get("identityfile", identity_files)
proxy_command = c.get("proxycommand")
except Exception as e:
# it could be safe to continue anyway,
# because parameters could have been manually specified
self.logger.error(
"Error while parsing ssh_config file: %s. Trying to continue anyway...", e
)
# Set default values
if not username:
username = getuser() # defaults to current user
port = port or 22
allow_unknown = allow_unknown or False
self.chown = False
self.fix_symlinks = fix_symlinks or False
self.delete = delete if delete is not None else True
if ssh_agent:
agent, agent_keys = get_ssh_agent_keys(self.logger)
else:
agent, agent_keys = None, None
if not identity_files and not password and not agent_keys:
self.logger.error(
"You need to specify either a password, an identity or to enable the ssh-agent support."
)
sys.exit(1)
# only root can change file owner
if username == 'root':
self.chown = True
sock = (hostname, port)
if proxy_command is not None:
sock = paramiko.proxy.ProxyCommand(proxy_command)
try:
transport = paramiko.Transport(sock)
except socket.gaierror:
self.logger.error(
"Hostname not known. Are you sure you inserted it correctly?")
sys.exit(1)
try:
ssh_host = hostname if port == 22 else "[{}]:{}".format(hostname, port)
known_hosts = None
"""
Before starting the transport session, we have to configure it.
Specifically, we need to configure the preferred PK algorithm.
If the system already knows a public key of a specific kind for
a remote host, we have to peek its type as the preferred one.
"""
if known_hosts_path:
known_hosts = paramiko.HostKeys()
known_hosts_path = os.path.realpath(
os.path.expanduser(known_hosts_path))
try:
known_hosts.load(known_hosts_path)
except IOError:
self.logger.error(
"Error while loading known hosts file at {}. Exiting...".format(
known_hosts_path)
)
sys.exit(1)
known_keys = known_hosts.lookup(ssh_host)
if known_keys is not None:
# one or more keys are already known
# set their type as preferred
transport.get_security_options().key_types = \
tuple(known_keys.keys())
transport.start_client()
if not known_hosts:
self.logger.warning("Security warning: skipping known hosts check...")
else:
pubk = transport.get_remote_server_key()
if ssh_host in known_hosts.keys():
if not known_hosts.check(ssh_host, pubk):
self.logger.error(
"Security warning: "
"remote key fingerprint {} for hostname "
"{} didn't match the one in known_hosts {}. "
"Exiting...".format(
pubk.get_base64(),
ssh_host,
known_hosts.lookup(hostname),
)
)
sys.exit(1)
elif not allow_unknown:
prompt = ("The authenticity of host '{}' can't be established.\n"
"{} key is {}.\n"
"Are you sure you want to continue connecting? [y/n] ").format(
ssh_host, pubk.get_name(), pubk.get_base64())
try:
# Renamed to `input` in Python 3.x
response = raw_input(prompt)
except NameError:
response = input(prompt)
# Note: we do not modify the user's known_hosts file
if not (response == "y" or response == "yes"):
self.logger.error(
"Host authentication failed."
)
sys.exit(1)
def perform_key_auth(pkey):
try:
transport.auth_publickey(
username=username,
key=pkey
)
return True
except paramiko.SSHException:
self.logger.warning(
"Authentication with identity {}... failed".format(pkey.get_base64()[:10])
)
return False
if password: # Password auth, if specified.
transport.auth_password(
username=username,
password=password
)
elif agent_keys: # SSH agent keys have higher priority
for pkey in agent_keys:
if perform_key_auth(pkey):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
elif identity_files: # Then follow identity file (specified from CL or ssh_config)
# Try identity files one by one, until one works
for key_path in identity_files:
key_path = os.path.expanduser(key_path)
try:
key = paramiko.RSAKey.from_private_key_file(key_path)
except paramiko.PasswordRequiredException:
pk_password = getpass(
"It seems that your identity from '{}' is encrypted. "
"Please enter your password: ".format(key_path)
)
try:
key = paramiko.RSAKey.from_private_key_file(key_path, pk_password)
except paramiko.SSHException:
self.logger.error(
"Incorrect passphrase. Cannot decode private key from '{}'.".format(key_path)
)
continue
except IOError or paramiko.SSHException:
self.logger.error(
"Something went wrong while opening '{}'. Skipping it.".format(key_path)
)
continue
if perform_key_auth(key):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
else: # No authentication method specified, we shouldn't arrive here.
assert False
except paramiko.SSHException:
self.logger.error(
"None of the provided authentication methods worked. Exiting."
)
transport.close()
sys.exit(1)
finally:
if agent:
agent.close()
self.sftp = paramiko.SFTPClient.from_transport(transport)
if self.remote_path.startswith("~"):
# nasty hack to let getcwd work without changing dir!
self.sftp.chdir('.')
self.remote_path = self.remote_path.replace(
"~", self.sftp.getcwd()) # home is the initial sftp dir
@staticmethod
def _file_need_upload(l_st, r_st):
return True if \
l_st.st_size != r_st.st_size or int(l_st.st_mtime) != r_st.st_mtime \
else False
@staticmethod
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one."""
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False
def _match_modes(self, remote_path, l_st):
"""Match mod, utime and uid/gid with locals one."""
self.sftp.chmod(remote_path, S_IMODE(l_st.st_mode))
self.sftp.utime(remote_path, (l_st.st_atime, l_st.st_mtime))
if self.chown:
self.sftp.chown(remote_path, l_st.st_uid, l_st.st_gid)
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime."""
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st)
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node."""
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
)
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
"""
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
)
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e))
def node_check_for_upload_create(self, relative_path, f):
"""Check if the given directory tree node has to be uploaded/created on the remote folder."""
if not relative_path:
# we're at the root of the shared directory tree
relative_path = str()
# the (absolute) local address of f.
local_path = path_join(self.local_path, relative_path, f)
try:
l_st = os.lstat(local_path)
except OSError as e:
"""A little background here.
Sometimes, in big clusters configurations (mail, etc.),
files could disappear or be moved, suddenly.
There's nothing to do about it,
system should be stopped before doing backups.
Anyway, we log it, and skip it.
"""
self.logger.error("error while checking {}: {}".format(relative_path, e))
return
if local_path in self.exclude_list:
self.logger.info("Skipping excluded file %s.", local_path)
return
# the (absolute) remote address of f.
remote_path = path_join(self.remote_path, relative_path, f)
# First case: f is a directory
if S_ISDIR(l_st.st_mode):
# we check if the folder exists on the remote side
# it has to be a folder, otherwise it would have already been
# deleted
try:
self.sftp.stat(remote_path)
except IOError: # it doesn't exist yet on remote side
self.sftp.mkdir(remote_path)
self._match_modes(remote_path, l_st)
# now, we should traverse f too (recursion magic!)
self.check_for_upload_create(path_join(relative_path, f))
# Second case: f is a symbolic link
elif S_ISLNK(l_st.st_mode):
# read the local link
local_link = os.readlink(local_path)
absolute_local_link = os.path.realpath(local_link)
# is it absolute?
is_absolute = local_link.startswith("/")
# and does it point inside the shared directory?
# add trailing slash (security)
trailing_local_path = path_join(self.local_path, '')
relpath = os.path.commonprefix(
[absolute_local_link,
trailing_local_path]
) == trailing_local_path
if relpath:
relative_link = absolute_local_link[len(trailing_local_path):]
else:
relative_link = None
"""
# Refactor them all, be efficient!
# Case A: absolute link pointing outside shared directory
# (we can only update the remote part)
if is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case B: absolute link pointing inside shared directory
# (we can leave it as it is or fix the prefix to match the one of the remote server)
elif is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Case C: relative link pointing outside shared directory
# (all we can do is try to make the link anyway)
elif not is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case D: relative link pointing inside shared directory
# (we preserve the relativity and link it!)
elif not is_absolute and relpath:
self.create_update_symlink(local_link, remote_path)
"""
if is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
path_join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Third case: regular file
elif S_ISREG(l_st.st_mode):
try:
r_st = self.sftp.lstat(remote_path)
if self._file_need_upload(l_st, r_st):
self.file_upload(local_path, remote_path, l_st)
except IOError as e:
if e.errno == errno.ENOENT:
self.file_upload(local_path, remote_path, l_st)
# Anything else.
else:
self.logger.warning("Skipping unsupported file %s.", local_path)
def check_for_upload_create(self, relative_path=None):
"""Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree."""
for f in os.listdir(
path_join(
self.local_path, relative_path) if relative_path else self.local_path
):
self.node_check_for_upload_create(relative_path, f)
def run(self):
"""Run the sync.
Confront the local and the remote directories and perform the needed changes."""
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1)
def create_parser():
"""Create the CLI argument parser."""
parser = argparse.ArgumentParser(
description='Sync a local and a remote folder through SFTP.'
)
parser.add_argument(
"path",
type=str,
metavar="local-path",
help="the path of the local folder",
)
parser.add_argument(
"remote",
type=str,
metavar="user[:password]@hostname:remote-path",
help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. "
"The hostname can be specified as a ssh_config's hostname too. "
"Every missing information will be gathered from there",
)
parser.add_argument(
"-k",
"--key",
metavar="identity-path",
action="append",
help="private key identity path (defaults to ~/.ssh/id_rsa)"
)
parser.add_argument(
"-l",
"--logging",
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG',
'NOTSET'],
default='ERROR',
help="set logging level"
)
parser.add_argument(
"-p",
"--port",
default=22,
type=int,
help="SSH remote port (defaults to 22)"
)
parser.add_argument(
"-f",
"--fix-symlinks",
action="store_true",
help="fix symbolic links on remote side"
)
parser.add_argument(
"-a",
"--ssh-agent",
action="store_true",
help="enable ssh-agent support"
)
parser.add_argument(
"-c",
"--ssh-config",
metavar="ssh_config path",
default="~/.ssh/config",
type=str,
help="path to the ssh-configuration file (default to ~/.ssh/config)"
)
parser.add_argument(
"-n",
"--known-hosts",
metavar="known_hosts path",
default="~/.ssh/known_hosts",
type=str,
help="path to the openSSH known_hosts file"
)
parser.add_argument(
"-d",
"--disable-known-hosts",
action="store_true",
help="disable known_hosts fingerprint checking (security warning!)"
)
parser.add_argument(
"-e",
"--exclude-from",
metavar="exclude-from-file-path",
type=str,
help="exclude files matching pattern in exclude-from-file-path"
)
parser.add_argument(
"-t",
"--do-not-delete",
action="store_true",
help="do not delete remote files missing from local folder"
)
parser.add_argument(
"-o",
"--allow-unknown",
action="store_true",
help="allow connection to unknown hosts"
)
parser.add_argument(
"-r",
"--create-remote-directory",
action="store_true",
help="Create remote base directory if missing on remote"
)
return parser
def main(args=None):
"""The main."""
parser = create_parser()
args = vars(parser.parse_args(args))
log_mapping = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
log_level = log_mapping[args['logging']]
del(args['logging'])
global logger
logger = configure_logging(log_level)
args_mapping = {
"path": "local_path",
"remote": "remote_url",
"ssh_config": "ssh_config_path",
"exclude_from": "exclude_file",
"known_hosts": "known_hosts_path",
"do_not_delete": "delete",
"key": "identity_files",
}
kwargs = { # convert the argument names to class constructor parameters
args_mapping[k]: v
for k, v in args.items()
if v and k in args_mapping
}
kwargs.update({
k: v
for k, v in args.items()
if v and k not in args_mapping
})
# Special case: disable known_hosts check
if args['disable_known_hosts']:
kwargs['known_hosts_path'] = None
del(kwargs['disable_known_hosts'])
# Toggle `do_not_delete` flag
if "delete" in kwargs:
kwargs["delete"] = not kwargs["delete"]
# Manually set the default identity file.
kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"]
sync = SFTPClone(
**kwargs
)
sync.run()
if __name__ == '__main__':
main()
|
unbit/sftpclone | sftpclone/sftpclone.py | get_ssh_agent_keys | python | def get_ssh_agent_keys(logger):
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys | Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys. | train | https://github.com/unbit/sftpclone/blob/1cc89478e680fc4e0d12b1a15b5bafd0390d05da/sftpclone/sftpclone.py#L88-L113 | null | #!/usr/bin/env python
# coding=utf-8
# Python 2.7 backward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import paramiko
import paramiko.py3compat
import os
import os.path
import sys
import errno
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_IMODE, S_IFMT
import argparse
import logging
from getpass import getuser, getpass
import glob
import socket
"""SFTPClone: sync local and remote directories."""
logger = None
try:
# Not available in Python 2.x
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def configure_logging(level=logging.DEBUG):
"""Configure the module logging engine."""
if level == logging.DEBUG:
# For debugging purposes, log from everyone!
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def path_join(*args):
"""
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
"""
args = (paramiko.py3compat.u(arg) for arg in args)
return os.path.join(*args)
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
"""
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path
class SFTPClone(object):
"""The SFTPClone class."""
def __init__(self, local_path, remote_url,
identity_files=None, port=None, fix_symlinks=False,
ssh_config_path=None, ssh_agent=False,
exclude_file=None, known_hosts_path=None,
delete=True, allow_unknown=False,
create_remote_directory=False,
):
"""Init the needed parameters and the SFTPClient."""
self.local_path = os.path.realpath(os.path.expanduser(local_path))
self.logger = logger or configure_logging()
self.create_remote_directory = create_remote_directory
if not os.path.exists(self.local_path):
self.logger.error("Local path MUST exist. Exiting.")
sys.exit(1)
if exclude_file:
with open(exclude_file) as f:
# As in rsync's exclude from, ignore lines with leading ; and #
# and treat each path as relative (thus by removing the leading
# /)
exclude_list = [
line.rstrip().lstrip("/")
for line in f
if not line.startswith((";", "#"))
]
# actually, is a set of excluded files
self.exclude_list = {
g
for pattern in exclude_list
for g in glob.glob(path_join(self.local_path, pattern))
}
else:
self.exclude_list = set()
username, password, hostname, self.remote_path = parse_username_password_hostname(remote_url)
identity_files = identity_files or []
proxy_command = None
if ssh_config_path:
try:
with open(os.path.expanduser(ssh_config_path)) as c_file:
ssh_config = paramiko.SSHConfig()
ssh_config.parse(c_file)
c = ssh_config.lookup(hostname)
hostname = c.get("hostname", hostname)
username = c.get("user", username)
port = int(c.get("port", port))
identity_files = c.get("identityfile", identity_files)
proxy_command = c.get("proxycommand")
except Exception as e:
# it could be safe to continue anyway,
# because parameters could have been manually specified
self.logger.error(
"Error while parsing ssh_config file: %s. Trying to continue anyway...", e
)
# Set default values
if not username:
username = getuser() # defaults to current user
port = port or 22
allow_unknown = allow_unknown or False
self.chown = False
self.fix_symlinks = fix_symlinks or False
self.delete = delete if delete is not None else True
if ssh_agent:
agent, agent_keys = get_ssh_agent_keys(self.logger)
else:
agent, agent_keys = None, None
if not identity_files and not password and not agent_keys:
self.logger.error(
"You need to specify either a password, an identity or to enable the ssh-agent support."
)
sys.exit(1)
# only root can change file owner
if username == 'root':
self.chown = True
sock = (hostname, port)
if proxy_command is not None:
sock = paramiko.proxy.ProxyCommand(proxy_command)
try:
transport = paramiko.Transport(sock)
except socket.gaierror:
self.logger.error(
"Hostname not known. Are you sure you inserted it correctly?")
sys.exit(1)
try:
ssh_host = hostname if port == 22 else "[{}]:{}".format(hostname, port)
known_hosts = None
"""
Before starting the transport session, we have to configure it.
Specifically, we need to configure the preferred PK algorithm.
If the system already knows a public key of a specific kind for
a remote host, we have to peek its type as the preferred one.
"""
if known_hosts_path:
known_hosts = paramiko.HostKeys()
known_hosts_path = os.path.realpath(
os.path.expanduser(known_hosts_path))
try:
known_hosts.load(known_hosts_path)
except IOError:
self.logger.error(
"Error while loading known hosts file at {}. Exiting...".format(
known_hosts_path)
)
sys.exit(1)
known_keys = known_hosts.lookup(ssh_host)
if known_keys is not None:
# one or more keys are already known
# set their type as preferred
transport.get_security_options().key_types = \
tuple(known_keys.keys())
transport.start_client()
if not known_hosts:
self.logger.warning("Security warning: skipping known hosts check...")
else:
pubk = transport.get_remote_server_key()
if ssh_host in known_hosts.keys():
if not known_hosts.check(ssh_host, pubk):
self.logger.error(
"Security warning: "
"remote key fingerprint {} for hostname "
"{} didn't match the one in known_hosts {}. "
"Exiting...".format(
pubk.get_base64(),
ssh_host,
known_hosts.lookup(hostname),
)
)
sys.exit(1)
elif not allow_unknown:
prompt = ("The authenticity of host '{}' can't be established.\n"
"{} key is {}.\n"
"Are you sure you want to continue connecting? [y/n] ").format(
ssh_host, pubk.get_name(), pubk.get_base64())
try:
# Renamed to `input` in Python 3.x
response = raw_input(prompt)
except NameError:
response = input(prompt)
# Note: we do not modify the user's known_hosts file
if not (response == "y" or response == "yes"):
self.logger.error(
"Host authentication failed."
)
sys.exit(1)
def perform_key_auth(pkey):
try:
transport.auth_publickey(
username=username,
key=pkey
)
return True
except paramiko.SSHException:
self.logger.warning(
"Authentication with identity {}... failed".format(pkey.get_base64()[:10])
)
return False
if password: # Password auth, if specified.
transport.auth_password(
username=username,
password=password
)
elif agent_keys: # SSH agent keys have higher priority
for pkey in agent_keys:
if perform_key_auth(pkey):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
elif identity_files: # Then follow identity file (specified from CL or ssh_config)
# Try identity files one by one, until one works
for key_path in identity_files:
key_path = os.path.expanduser(key_path)
try:
key = paramiko.RSAKey.from_private_key_file(key_path)
except paramiko.PasswordRequiredException:
pk_password = getpass(
"It seems that your identity from '{}' is encrypted. "
"Please enter your password: ".format(key_path)
)
try:
key = paramiko.RSAKey.from_private_key_file(key_path, pk_password)
except paramiko.SSHException:
self.logger.error(
"Incorrect passphrase. Cannot decode private key from '{}'.".format(key_path)
)
continue
except IOError or paramiko.SSHException:
self.logger.error(
"Something went wrong while opening '{}'. Skipping it.".format(key_path)
)
continue
if perform_key_auth(key):
break # Authentication worked.
else: # None of the keys worked.
raise paramiko.SSHException
else: # No authentication method specified, we shouldn't arrive here.
assert False
except paramiko.SSHException:
self.logger.error(
"None of the provided authentication methods worked. Exiting."
)
transport.close()
sys.exit(1)
finally:
if agent:
agent.close()
self.sftp = paramiko.SFTPClient.from_transport(transport)
if self.remote_path.startswith("~"):
# nasty hack to let getcwd work without changing dir!
self.sftp.chdir('.')
self.remote_path = self.remote_path.replace(
"~", self.sftp.getcwd()) # home is the initial sftp dir
@staticmethod
def _file_need_upload(l_st, r_st):
return True if \
l_st.st_size != r_st.st_size or int(l_st.st_mtime) != r_st.st_mtime \
else False
@staticmethod
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one."""
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False
def _match_modes(self, remote_path, l_st):
"""Match mod, utime and uid/gid with locals one."""
self.sftp.chmod(remote_path, S_IMODE(l_st.st_mode))
self.sftp.utime(remote_path, (l_st.st_atime, l_st.st_mtime))
if self.chown:
self.sftp.chown(remote_path, l_st.st_uid, l_st.st_gid)
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime."""
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st)
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node."""
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
)
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
"""
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
)
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e))
def node_check_for_upload_create(self, relative_path, f):
"""Check if the given directory tree node has to be uploaded/created on the remote folder."""
if not relative_path:
# we're at the root of the shared directory tree
relative_path = str()
# the (absolute) local address of f.
local_path = path_join(self.local_path, relative_path, f)
try:
l_st = os.lstat(local_path)
except OSError as e:
"""A little background here.
Sometimes, in big clusters configurations (mail, etc.),
files could disappear or be moved, suddenly.
There's nothing to do about it,
system should be stopped before doing backups.
Anyway, we log it, and skip it.
"""
self.logger.error("error while checking {}: {}".format(relative_path, e))
return
if local_path in self.exclude_list:
self.logger.info("Skipping excluded file %s.", local_path)
return
# the (absolute) remote address of f.
remote_path = path_join(self.remote_path, relative_path, f)
# First case: f is a directory
if S_ISDIR(l_st.st_mode):
# we check if the folder exists on the remote side
# it has to be a folder, otherwise it would have already been
# deleted
try:
self.sftp.stat(remote_path)
except IOError: # it doesn't exist yet on remote side
self.sftp.mkdir(remote_path)
self._match_modes(remote_path, l_st)
# now, we should traverse f too (recursion magic!)
self.check_for_upload_create(path_join(relative_path, f))
# Second case: f is a symbolic link
elif S_ISLNK(l_st.st_mode):
# read the local link
local_link = os.readlink(local_path)
absolute_local_link = os.path.realpath(local_link)
# is it absolute?
is_absolute = local_link.startswith("/")
# and does it point inside the shared directory?
# add trailing slash (security)
trailing_local_path = path_join(self.local_path, '')
relpath = os.path.commonprefix(
[absolute_local_link,
trailing_local_path]
) == trailing_local_path
if relpath:
relative_link = absolute_local_link[len(trailing_local_path):]
else:
relative_link = None
"""
# Refactor them all, be efficient!
# Case A: absolute link pointing outside shared directory
# (we can only update the remote part)
if is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case B: absolute link pointing inside shared directory
# (we can leave it as it is or fix the prefix to match the one of the remote server)
elif is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Case C: relative link pointing outside shared directory
# (all we can do is try to make the link anyway)
elif not is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case D: relative link pointing inside shared directory
# (we preserve the relativity and link it!)
elif not is_absolute and relpath:
self.create_update_symlink(local_link, remote_path)
"""
if is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
path_join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Third case: regular file
elif S_ISREG(l_st.st_mode):
try:
r_st = self.sftp.lstat(remote_path)
if self._file_need_upload(l_st, r_st):
self.file_upload(local_path, remote_path, l_st)
except IOError as e:
if e.errno == errno.ENOENT:
self.file_upload(local_path, remote_path, l_st)
# Anything else.
else:
self.logger.warning("Skipping unsupported file %s.", local_path)
def check_for_upload_create(self, relative_path=None):
"""Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree."""
for f in os.listdir(
path_join(
self.local_path, relative_path) if relative_path else self.local_path
):
self.node_check_for_upload_create(relative_path, f)
def run(self):
"""Run the sync.
Confront the local and the remote directories and perform the needed changes."""
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1)
def create_parser():
"""Create the CLI argument parser."""
parser = argparse.ArgumentParser(
description='Sync a local and a remote folder through SFTP.'
)
parser.add_argument(
"path",
type=str,
metavar="local-path",
help="the path of the local folder",
)
parser.add_argument(
"remote",
type=str,
metavar="user[:password]@hostname:remote-path",
help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. "
"The hostname can be specified as a ssh_config's hostname too. "
"Every missing information will be gathered from there",
)
parser.add_argument(
"-k",
"--key",
metavar="identity-path",
action="append",
help="private key identity path (defaults to ~/.ssh/id_rsa)"
)
parser.add_argument(
"-l",
"--logging",
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG',
'NOTSET'],
default='ERROR',
help="set logging level"
)
parser.add_argument(
"-p",
"--port",
default=22,
type=int,
help="SSH remote port (defaults to 22)"
)
parser.add_argument(
"-f",
"--fix-symlinks",
action="store_true",
help="fix symbolic links on remote side"
)
parser.add_argument(
"-a",
"--ssh-agent",
action="store_true",
help="enable ssh-agent support"
)
parser.add_argument(
"-c",
"--ssh-config",
metavar="ssh_config path",
default="~/.ssh/config",
type=str,
help="path to the ssh-configuration file (default to ~/.ssh/config)"
)
parser.add_argument(
"-n",
"--known-hosts",
metavar="known_hosts path",
default="~/.ssh/known_hosts",
type=str,
help="path to the openSSH known_hosts file"
)
parser.add_argument(
"-d",
"--disable-known-hosts",
action="store_true",
help="disable known_hosts fingerprint checking (security warning!)"
)
parser.add_argument(
"-e",
"--exclude-from",
metavar="exclude-from-file-path",
type=str,
help="exclude files matching pattern in exclude-from-file-path"
)
parser.add_argument(
"-t",
"--do-not-delete",
action="store_true",
help="do not delete remote files missing from local folder"
)
parser.add_argument(
"-o",
"--allow-unknown",
action="store_true",
help="allow connection to unknown hosts"
)
parser.add_argument(
"-r",
"--create-remote-directory",
action="store_true",
help="Create remote base directory if missing on remote"
)
return parser
def main(args=None):
"""The main."""
parser = create_parser()
args = vars(parser.parse_args(args))
log_mapping = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
log_level = log_mapping[args['logging']]
del(args['logging'])
global logger
logger = configure_logging(log_level)
args_mapping = {
"path": "local_path",
"remote": "remote_url",
"ssh_config": "ssh_config_path",
"exclude_from": "exclude_file",
"known_hosts": "known_hosts_path",
"do_not_delete": "delete",
"key": "identity_files",
}
kwargs = { # convert the argument names to class constructor parameters
args_mapping[k]: v
for k, v in args.items()
if v and k in args_mapping
}
kwargs.update({
k: v
for k, v in args.items()
if v and k not in args_mapping
})
# Special case: disable known_hosts check
if args['disable_known_hosts']:
kwargs['known_hosts_path'] = None
del(kwargs['disable_known_hosts'])
# Toggle `do_not_delete` flag
if "delete" in kwargs:
kwargs["delete"] = not kwargs["delete"]
# Manually set the default identity file.
kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"]
sync = SFTPClone(
**kwargs
)
sync.run()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.