index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
57,362 | PX4/flight_review | refs/heads/main | /app/plot_app/events.py | """ Event parsing """
import json
import lzma
import os
import sys
import random
from helper import download_file_maybe
from config import get_events_url, get_events_filename, get_metadata_cache_path
from pyulog import ULog
#pylint: disable=wrong-import-position
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libevents/libs/python'))
from libevents_parse.parser import Parser
#pylint: disable=invalid-name,global-statement
class FileCache:
""" Very simple file cache with a maximum number of files """
def __init__(self, path, max_num_files=1000):
self._path = path
self._max_num_files = max_num_files
if not os.path.isdir(path):
os.mkdir(path)
def access(self, file_name):
""" check if a file exists in the cache """
return os.path.isfile(os.path.join(self._path, file_name))
def insert(self, file_name, data):
""" insert data (bytes) """
# check cache size
cache_files = os.listdir(self._path)
if len(cache_files) >= self._max_num_files:
# select a random file (could be improved to LRU)
# (if flight review runs in multi-process mode, there's a minimal chance we delete a
# file that's trying to be accessed by another instance)
remove_index = random.randint(0, len(cache_files) - 1)
os.remove(os.path.join(self._path, cache_files[remove_index]))
# write
with open(os.path.join(self._path, file_name), 'wb') as cache_file:
cache_file.write(data)
@property
def path(self):
""" Get configured path """
return self._path
def get_event_definitions_from_log_file(ulog: ULog):
"""
Get the event definitions json file from the log file.
:return: path to json file or None
"""
if 'metadata_events' in ulog.msg_info_multiple_dict and \
'metadata_events_sha256' in ulog.msg_info_dict:
file_hash = ulog.msg_info_dict['metadata_events_sha256']
if len(file_hash) <= 64 and file_hash.isalnum():
file_cache = FileCache(get_metadata_cache_path())
events_metadata_filename = 'events.' + file_hash + '.json'
if not file_cache.access(events_metadata_filename):
# insert into the cache
metadata_events_bytes = b''.join(ulog.msg_info_multiple_dict['metadata_events'][0])
metadata_events_json = lzma.decompress(metadata_events_bytes)
file_cache.insert(events_metadata_filename, metadata_events_json)
return os.path.join(file_cache.path, events_metadata_filename)
return None
__event_parser = None # fallback event parser, used if the log doesn't contain the event definitions
def get_event_parser(ulog: ULog):
""" get event parser instance or None on error """
events_profile = 'dev'
event_definitions_json = get_event_definitions_from_log_file(ulog)
if event_definitions_json is not None:
with open(event_definitions_json, 'r', encoding="utf8") as json_file:
p = Parser()
p.load_definitions(json.load(json_file))
p.set_profile(events_profile)
return p
# No json definitions in the log -> use global definitions
global __event_parser
events_json_xz = get_events_filename()
# check for cached file update
downloaded = download_file_maybe(events_json_xz, get_events_url())
if downloaded == 2 or (downloaded == 1 and __event_parser is None):
# decompress
with lzma.open(events_json_xz, 'rt') as json_file:
p = Parser()
p.load_definitions(json.load(json_file))
p.set_profile(events_profile)
__event_parser = p
return __event_parser
def get_logged_events(ulog):
"""
Get the events as list of messages
:return: list of (timestamp, time str, log level str, message) tuples
"""
try:
event_parser = get_event_parser(ulog)
except Exception as e:
print('Failed to get event parser: {}'.format(e))
return []
def event_log_level_str(log_level: int):
return {0: 'EMERGENCY',
1: 'ALERT',
2: 'CRITICAL',
3: 'ERROR',
4: 'WARNING',
5: 'NOTICE',
6: 'INFO',
7: 'DEBUG',
8: 'PROTOCOL',
9: 'DISABLED'}.get(log_level, 'UNKNOWN')
def time_str(t):
m1, s1 = divmod(int(t/1e6), 60)
h1, m1 = divmod(m1, 60)
return "{:d}:{:02d}:{:02d}".format(h1, m1, s1)
# parse events
messages = []
try:
events = ulog.get_dataset('event')
all_ids = events.data['id']
for event_idx, event_id in enumerate(all_ids):
log_level = (events.data['log_levels'][event_idx] >> 4) & 0xf
if log_level >= 8:
continue
args = []
i = 0
while True:
arg_str = 'arguments[{}]'.format(i)
if arg_str not in events.data:
break
arg = events.data[arg_str][event_idx]
args.append(arg)
i += 1
log_level_str = event_log_level_str(log_level)
t = events.data['timestamp'][event_idx]
event = None
if event_parser is not None:
event = event_parser.parse(event_id, bytes(args))
if event is None:
messages.append((t, time_str(t), log_level_str, \
'[Unknown event with ID {:}]'.format(event_id)))
else:
# only show default group
if event.group() == "default":
messages.append((t, time_str(t), log_level_str, event.message()))
# we could expand this a bit for events:
# - show the description too
# - handle url's as link (currently it's shown as text, and all tags are escaped)
except (KeyError, IndexError, ValueError) as error:
# no events in log
pass
return messages
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,363 | PX4/flight_review | refs/heads/main | /app/download_logs.py | #! /usr/bin/env python3
""" Script to download public logs """
import os
import glob
import argparse
import json
import datetime
import sys
import time
import requests
from plot_app.config_tables import *
def get_arguments():
""" Get parsed CLI arguments """
parser = argparse.ArgumentParser(description='Python script for downloading public logs '
'from the PX4/flight_review database.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--max-num', '-n', type=int, default=-1,
help='Maximum number of files to download that match the search criteria. '
'Default: download all files.')
parser.add_argument('-d', '--download-folder', type=str, default="data/downloaded/",
help='The folder to store the downloaded logfiles.')
parser.add_argument('--print', action='store_true', dest="print_entries",
help='Whether to only print (not download) the database entries.')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Whether to overwrite already existing files in download folder.')
parser.add_argument('--db-info-api', type=str, default="https://review.px4.io/dbinfo",
help='The url at which the server provides the dbinfo API.')
parser.add_argument('--download-api', type=str, default="https://review.px4.io/download",
help='The url at which the server provides the download API.')
parser.add_argument('--mav-type', type=str, default=None, nargs='+',
help='Filter logs by mav type (case insensitive). Specifying multiple '
'mav types is possible. e.g. Quadrotor, Hexarotor')
parser.add_argument('--flight-modes', type=str, default=None, nargs='+',
help='Filter logs by flight modes. If multiple are provided, the log must '
'contain all modes. e.g. Mission')
parser.add_argument('--error-labels', default=None, nargs='+', type=str,
help='Filter logs by error labels. If multiple are provided, the log must '
'contain all labels. e.g. Vibration')
parser.add_argument('--rating', default=None, type=str, nargs='+',
help='Filter logs by rating. e.g. Good')
parser.add_argument('--uuid', default=None, type=str, nargs='+',
help='Filter logs by a particular vehicle uuid. e.g. 0123456789')
parser.add_argument('--log-id', default=None, type=str, nargs='+',
help='Filter logs by a particular log id')
parser.add_argument('--vehicle-name', default=None, type=str,
help='Filter logs by a particular vehicle name.')
parser.add_argument('--airframe-name', default=None, type=str,
help='Filter logs by a particular airframe name. e.g. Generic Quadrotor X')
parser.add_argument('--airframe-type', default=None, type=str,
help='Filter logs by a particular airframe type. e.g. Quadrotor X')
parser.add_argument('--latest-per-vehicle', action='store_true', dest="latest_per_vehicle",
help='Download only the latest log (by date) for each ' \
'unique vehicle (uuid).')
parser.add_argument('--source', default=None, type=str,
help='The source of the log upload. e.g. ["webui", "CI"]')
parser.add_argument('--git-hash', default=None, type=str,
help='The git hash of the PX4 Firmware version.')
return parser.parse_args()
def flight_modes_to_ids(flight_modes):
"""
returns a list of mode ids for a list of mode labels
"""
flight_ids = []
for i,value in flight_modes_table.items():
if value[0] in flight_modes:
flight_ids.append(i)
return flight_ids
def error_labels_to_ids(error_labels):
"""
returns a list of error ids for a list of error labels
"""
error_id_table = {label: id for id, label in error_labels_table.items()}
error_ids = [error_id_table[error_label] for error_label in error_labels]
return error_ids
def main():
""" main script entry point """
args = get_arguments()
try:
# the db_info_api sends a json file with a list of all public database entries
db_entries_list = requests.get(url=args.db_info_api, timeout=5*60).json()
except:
print("Server request failed.")
raise
if args.print_entries:
# only print the json output without downloading logs
print(json.dumps(db_entries_list, indent=4, sort_keys=True))
else:
if not os.path.isdir(args.download_folder): # returns true if path is an existing directory
print("creating download directory " + args.download_folder)
os.makedirs(args.download_folder)
# find already existing logs in download folder
logfile_pattern = os.path.join(os.path.abspath(args.download_folder), "*.ulg")
logfiles = glob.glob(os.path.join(os.getcwd(), logfile_pattern))
logids = frozenset(os.path.splitext(os.path.basename(f))[0] for f in logfiles)
# filter for mav types
if args.mav_type is not None:
mav = [mav_type.lower() for mav_type in args.mav_type]
db_entries_list = [entry for entry in db_entries_list
if entry["mav_type"].lower() in mav]
# filter for rating
if args.rating is not None:
rate = [rating.lower() for rating in args.rating]
db_entries_list = [entry for entry in db_entries_list
if entry["rating"].lower() in rate]
# filter for error labels
if args.error_labels is not None:
err_labels = error_labels_to_ids(args.error_labels)
db_entries_list = [entry for entry in db_entries_list
if set(err_labels).issubset(set(entry["error_labels"]))]
# compares numbers, must contain all
# filter for flight modes
if args.flight_modes is not None:
modes = flight_modes_to_ids(args.flight_modes)
db_entries_list = [entry for entry in db_entries_list
if set(modes).issubset(set(entry["flight_modes"]))]
# compares numbers, must contain all
# filter for vehicle uuid
if args.uuid is not None:
db_entries_list = [
entry for entry in db_entries_list if entry['vehicle_uuid'] in args.uuid]
# filter log_id
if args.log_id is not None:
arg_log_ids_without_dashes = [log_id.replace("-", "") for log_id in args.log_id]
db_entries_list = [
entry for entry in db_entries_list
if entry['log_id'].replace("-", "") in arg_log_ids_without_dashes]
# filter for vehicle name
if args.vehicle_name is not None:
db_entries_list = [
entry for entry in db_entries_list if entry['vehicle_name'] == args.vehicle_name]
# filter for airframe name
if args.airframe_name is not None:
db_entries_list = [
entry for entry in db_entries_list if entry['airframe_name'] == args.airframe_name]
# filter for airframe type
if args.airframe_type is not None:
db_entries_list = [
entry for entry in db_entries_list if entry['airframe_type'] == args.airframe_type]
if args.latest_per_vehicle:
# find latest log_date for all different vehicles
uuids = {}
for entry in db_entries_list:
if 'vehicle_uuid' in entry:
uuid = entry['vehicle_uuid']
date = datetime.datetime.strptime(entry['log_date'], '%Y-%m-%d')
if uuid in uuids:
if date > uuids[uuid]:
uuids[uuid] = date
else:
uuids[uuid] = date
# filter: use the latest log for each vehicle
db_entries_list_filtered = []
added_uuids = set()
for entry in db_entries_list:
if 'vehicle_uuid' in entry:
date = datetime.datetime.strptime(entry['log_date'], '%Y-%m-%d')
uuid = entry['vehicle_uuid']
if date == uuids[entry['vehicle_uuid']] and not uuid in added_uuids:
db_entries_list_filtered.append(entry)
added_uuids.add(uuid)
db_entries_list = db_entries_list_filtered
if args.source is not None:
db_entries_list = [
entry for entry in db_entries_list
if 'source' in entry and entry['source'] == args.source]
if args.git_hash is not None:
db_entries_list = [
entry for entry in db_entries_list if entry['ver_sw'] == args.git_hash]
# sort list order to first download the newest log files
db_entries_list = sorted(
db_entries_list,
key=lambda x: datetime.datetime.strptime(x['log_date'], '%Y-%m-%d'),
reverse=True)
# set number of files to download
n_en = len(db_entries_list)
if args.max_num > 0:
n_en = min(n_en, args.max_num)
n_downloaded = 0
n_skipped = 0
for i in range(n_en):
entry_id = db_entries_list[i]['log_id']
num_tries = 0
for num_tries in range(100):
try:
if args.overwrite or entry_id not in logids:
file_path = os.path.join(args.download_folder, entry_id + ".ulg")
print('downloading {:}/{:} ({:})'.format(i + 1, n_en, entry_id))
request = requests.get(url=args.download_api +
"?log=" + entry_id, stream=True,
timeout=10*60)
with open(file_path, 'wb') as log_file:
for chunk in request.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
log_file.write(chunk)
n_downloaded += 1
else:
n_skipped += 1
break
except Exception as ex:
print(ex)
print('Waiting for 30 seconds to retry')
time.sleep(30)
if num_tries == 99:
print('Retried', str(num_tries + 1), 'times without success, exiting.')
sys.exit(1)
print('{:} logs downloaded to {:}, {:} logs skipped (already downloaded)'.format(
n_downloaded, args.download_folder, n_skipped))
if __name__ == '__main__':
main()
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,364 | PX4/flight_review | refs/heads/main | /app/setup_db.py | #! /usr/bin/env python3
# Script to create or upgrade the SQLite DB
import sqlite3 as lite
import sys
import os
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plot_app'))
from plot_app.config import get_db_filename, get_log_filepath, \
get_cache_filepath, get_kml_filepath, get_overview_img_filepath
log_dir = get_log_filepath()
if not os.path.exists(log_dir):
print('creating log directory '+log_dir)
os.makedirs(log_dir)
cur_dir = get_cache_filepath()
if not os.path.exists(cur_dir):
print('creating cache directory '+cur_dir)
os.makedirs(cur_dir)
cur_dir = get_kml_filepath()
if not os.path.exists(cur_dir):
print('creating kml directory '+cur_dir)
os.makedirs(cur_dir)
cur_dir = get_overview_img_filepath()
if not os.path.exists(cur_dir):
print('creating overview image directory '+cur_dir)
os.makedirs(cur_dir)
print('creating DB at '+get_db_filename())
con = lite.connect(get_db_filename())
with con:
cur = con.cursor()
# Logs table (contains information not found in the log file)
cur.execute("PRAGMA table_info('Logs')")
columns = cur.fetchall()
if len(columns) == 0:
cur.execute("CREATE TABLE Logs("
"Id TEXT, " # log id (part of the file name)
"Title TEXT, "
"Description TEXT, "
"OriginalFilename TEXT, "
"Date TIMESTAMP, " # date & time when uploaded
"AllowForAnalysis INTEGER, " # if 1 allow for statistical analysis
"Obfuscated INTEGER, "
"Source TEXT, " # where it comes from: 'webui', 'CI', 'QGroundControl'
"Email TEXT, " # email (may be empty)
"WindSpeed INT, " # Wind speed in beaufort scale
"Rating TEXT, " # how the flight was rated
"Feedback TEXT, " # additional feedback
"Type TEXT, " # upload type: 'personal' (or '') or 'flightreport'
"VideoUrl TEXT, "
"ErrorLabels TEXT, " # the type of error (if any) that occurred during flight
"Public INT, " # if 1 this log can be publicly listed
"Token TEXT, " # Security token (currently used to delete the entry)
"CONSTRAINT Id_PK PRIMARY KEY (Id))")
else:
# try to upgrade
column_names = [ x[1] for x in columns]
if not 'Email' in column_names:
print('Adding column Email')
cur.execute("ALTER TABLE Logs ADD COLUMN Email TEXT DEFAULT ''")
if not 'WindSpeed' in column_names:
print('Adding column WindSpeed')
cur.execute("ALTER TABLE Logs ADD COLUMN WindSpeed INT DEFAULT -1")
if not 'Rating' in column_names:
print('Adding column Rating')
cur.execute("ALTER TABLE Logs ADD COLUMN Rating TEXT DEFAULT ''")
if not 'Feedback' in column_names:
print('Adding column Feedback')
cur.execute("ALTER TABLE Logs ADD COLUMN Feedback TEXT DEFAULT ''")
if not 'Type' in column_names:
print('Adding column Type')
cur.execute("ALTER TABLE Logs ADD COLUMN Type TEXT DEFAULT ''")
if not 'VideoUrl' in column_names:
print('Adding column VideoUrl')
cur.execute("ALTER TABLE Logs ADD COLUMN VideoUrl TEXT DEFAULT ''")
if not 'ErrorLabels' in column_names:
print('Adding column ErrorLabels')
cur.execute("ALTER TABLE Logs ADD COLUMN ErrorLabels TEXT DEFAULT ''")
if not 'Public' in column_names:
print('Adding column Public')
cur.execute("ALTER TABLE Logs ADD COLUMN Public INT DEFAULT 0")
if not 'Token' in column_names:
print('Adding column Token')
cur.execute("ALTER TABLE Logs ADD COLUMN Token TEXT DEFAULT ''")
# LogsGenerated table (information from the log file, for faster access)
cur.execute("PRAGMA table_info('LogsGenerated')")
columns = cur.fetchall()
if len(columns) == 0:
cur.execute("CREATE TABLE LogsGenerated("
"Id TEXT, " # log id
"Duration INT, " # logging duration in [s]
"MavType TEXT, " # vehicle type
"Estimator TEXT, "
"AutostartId INT, " # airframe config
"Hardware TEXT, " # board
"Software TEXT, " # software (git tag)
"NumLoggedErrors INT, " # number of logged error messages (or more severe)
"NumLoggedWarnings INT, "
"FlightModes TEXT, " # all flight modes as comma-separated int's
"SoftwareVersion TEXT, " # release version
"UUID TEXT, " # vehicle UUID (sys_uuid in log)
"FlightModeDurations TEXT, " # comma-separated list of <flight_mode_int>:<duration_sec>
"StartTime INT, " #UTC Timestap from GPS log (useful when uploading multiple logs)
"CONSTRAINT Id_PK PRIMARY KEY (Id))")
else:
# try to upgrade
column_names = [ x[1] for x in columns]
if not 'SoftwareVersion' in column_names:
print('Adding column SoftwareVersion')
cur.execute("ALTER TABLE LogsGenerated ADD COLUMN SoftwareVersion TEXT DEFAULT ''")
if not 'UUID' in column_names:
print('Adding column UUID')
cur.execute("ALTER TABLE LogsGenerated ADD COLUMN UUID TEXT DEFAULT ''")
if not 'FlightModeDurations' in column_names:
print('Adding column FlightModeDurations')
cur.execute("ALTER TABLE LogsGenerated ADD COLUMN FlightModeDurations TEXT DEFAULT ''")
if not 'StartTime' in column_names:
print('Adding column StartTime')
cur.execute("ALTER TABLE LogsGenerated ADD COLUMN StartTime INT DEFAULT 0")
# Vehicle table (contains information about a vehicle)
cur.execute("PRAGMA table_info('Vehicle')")
columns = cur.fetchall()
if len(columns) == 0:
cur.execute("CREATE TABLE Vehicle("
"UUID TEXT, " # vehicle UUID (sys_uuid in log)
"LatestLogId TEXT, " # log id of latest uploaded log file
"Name TEXT, " # vehicle Name (as provided by the uploader)
"FlightTime INTEGER, " # latest flight time in seconds
"CONSTRAINT UUID_PK PRIMARY KEY (UUID))")
con.close()
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,365 | PX4/flight_review | refs/heads/main | /app/plot_app/configured_plots.py | """ This contains the list of all drawn plots on the log plotting page """
from html import escape
from bokeh.layouts import column
from bokeh.models import Range1d
from bokeh.models.widgets import Button
from bokeh.io import curdoc
from config import *
from helper import *
from leaflet import ulog_to_polyline
from plotting import *
from plotted_tables import (
get_logged_messages, get_changed_parameters,
get_info_table_html, get_heading_html, get_error_labels_html,
get_hardfault_html, get_corrupt_log_html
)
from vtol_tailsitter import *
#pylint: disable=cell-var-from-loop, undefined-loop-variable,
#pylint: disable=consider-using-enumerate,too-many-statements
def generate_plots(ulog, px4_ulog, db_data, vehicle_data, link_to_3d_page,
link_to_pid_analysis_page):
""" create a list of bokeh plots (and widgets) to show """
plots = []
data = ulog.data_list
# COMPATIBILITY support for old logs
if any(elem.name in ('vehicle_air_data', 'vehicle_magnetometer') for elem in data):
baro_alt_meter_topic = 'vehicle_air_data'
magnetometer_ga_topic = 'vehicle_magnetometer'
else: # old
baro_alt_meter_topic = 'sensor_combined'
magnetometer_ga_topic = 'sensor_combined'
manual_control_sp_controls = ['roll', 'pitch', 'yaw', 'throttle']
manual_control_sp_throttle_range = '[-1, 1]'
vehicle_gps_position_altitude = None
for topic in data:
if topic.name == 'system_power':
# COMPATIBILITY: rename fields to new format
if 'voltage5V_v' in topic.data: # old (prior to PX4/Firmware:213aa93)
topic.data['voltage5v_v'] = topic.data.pop('voltage5V_v')
if 'voltage3V3_v' in topic.data: # old (prior to PX4/Firmware:213aa93)
topic.data['sensors3v3[0]'] = topic.data.pop('voltage3V3_v')
if 'voltage3v3_v' in topic.data:
topic.data['sensors3v3[0]'] = topic.data.pop('voltage3v3_v')
elif topic.name == 'tecs_status':
if 'airspeed_sp' in topic.data: # old (prior to PX4-Autopilot/pull/16585)
topic.data['true_airspeed_sp'] = topic.data.pop('airspeed_sp')
elif topic.name == 'manual_control_setpoint':
if 'throttle' not in topic.data: # old (prior to PX4-Autopilot/pull/15949)
manual_control_sp_controls = ['y', 'x', 'r', 'z']
manual_control_sp_throttle_range = '[0, 1]'
elif topic.name == 'vehicle_gps_position':
if ulog.msg_info_dict.get('ver_data_format', 0) >= 2:
vehicle_gps_position_altitude = topic.data['altitude_msl_m']
else: # COMPATIBILITY
vehicle_gps_position_altitude = topic.data['alt'] * 0.001
if any(elem.name == 'vehicle_angular_velocity' for elem in data):
rate_estimated_topic_name = 'vehicle_angular_velocity'
rate_groundtruth_topic_name = 'vehicle_angular_velocity_groundtruth'
rate_field_names = ['xyz[0]', 'xyz[1]', 'xyz[2]']
else: # old
rate_estimated_topic_name = 'vehicle_attitude'
rate_groundtruth_topic_name = 'vehicle_attitude_groundtruth'
rate_field_names = ['rollspeed', 'pitchspeed', 'yawspeed']
if any(elem.name == 'manual_control_switches' for elem in data):
manual_control_switches_topic = 'manual_control_switches'
else: # old
manual_control_switches_topic = 'manual_control_setpoint'
dynamic_control_alloc = any(elem.name in ('actuator_motors', 'actuator_servos')
for elem in data)
actuator_controls_0 = ActuatorControls(ulog, dynamic_control_alloc, 0)
actuator_controls_1 = ActuatorControls(ulog, dynamic_control_alloc, 1)
# initialize flight mode changes
flight_mode_changes = get_flight_mode_changes(ulog)
# VTOL state changes & vehicle type
vtol_states = None
is_vtol = False
is_vtol_tailsitter = False
try:
cur_dataset = ulog.get_dataset('vehicle_status')
if np.amax(cur_dataset.data['is_vtol']) == 1:
is_vtol = True
# check if is tailsitter
is_vtol_tailsitter = np.amax(cur_dataset.data['is_vtol_tailsitter']) == 1
# find mode after transitions (states: 1=transition, 2=FW, 3=MC)
if 'vehicle_type' in cur_dataset.data:
vehicle_type_field = 'vehicle_type'
vtol_state_mapping = {2: 2, 1: 3}
vehicle_type = cur_dataset.data['vehicle_type']
in_transition_mode = cur_dataset.data['in_transition_mode']
vtol_states = []
for i in range(len(vehicle_type)):
# a VTOL can change state also w/o in_transition_mode set
# (e.g. in Manual mode)
if i == 0 or in_transition_mode[i-1] != in_transition_mode[i] or \
vehicle_type[i-1] != vehicle_type[i]:
vtol_states.append((cur_dataset.data['timestamp'][i],
in_transition_mode[i]))
else: # COMPATIBILITY: old logs (https://github.com/PX4/Firmware/pull/11918)
vtol_states = cur_dataset.list_value_changes('in_transition_mode')
vehicle_type_field = 'is_rotary_wing'
vtol_state_mapping = {0: 2, 1: 3}
for i in range(len(vtol_states)):
if vtol_states[i][1] == 0:
t = vtol_states[i][0]
idx = np.argmax(cur_dataset.data['timestamp'] >= t) + 1
vtol_states[i] = (t, vtol_state_mapping[
cur_dataset.data[vehicle_type_field][idx]])
vtol_states.append((ulog.last_timestamp, -1))
except (KeyError, IndexError) as error:
vtol_states = None
# Heading
curdoc().template_variables['title_html'] = get_heading_html(
ulog, px4_ulog, db_data, link_to_3d_page,
additional_links=[("Open PID Analysis", link_to_pid_analysis_page)])
# info text on top (logging duration, max speed, ...)
curdoc().template_variables['info_table_html'] = \
get_info_table_html(ulog, px4_ulog, db_data, vehicle_data, vtol_states)
curdoc().template_variables['error_labels_html'] = get_error_labels_html()
hardfault_html = get_hardfault_html(ulog)
if hardfault_html is not None:
curdoc().template_variables['hardfault_html'] = hardfault_html
corrupt_log_html = get_corrupt_log_html(ulog)
if corrupt_log_html:
curdoc().template_variables['corrupt_log_html'] = corrupt_log_html
# Position plot
data_plot = DataPlot2D(data, plot_config, 'vehicle_local_position',
x_axis_label='[m]', y_axis_label='[m]', plot_height='large')
data_plot.add_graph('y', 'x', colors2[0], 'Estimated',
check_if_all_zero=True)
if not data_plot.had_error: # vehicle_local_position is required
data_plot.change_dataset('vehicle_local_position_setpoint')
data_plot.add_graph('y', 'x', colors2[1], 'Setpoint')
# groundtruth (SITL only)
data_plot.change_dataset('vehicle_local_position_groundtruth')
data_plot.add_graph('y', 'x', color_gray, 'Groundtruth')
# GPS + position setpoints
plot_map(ulog, plot_config, map_type='plain', setpoints=True,
bokeh_plot=data_plot.bokeh_plot)
if data_plot.finalize() is not None:
plots.append(data_plot.bokeh_plot)
if any(elem.name == 'vehicle_gps_position' for elem in ulog.data_list):
# Leaflet Map
try:
pos_datas, flight_modes = ulog_to_polyline(ulog, flight_mode_changes)
curdoc().template_variables['pos_datas'] = pos_datas
curdoc().template_variables['pos_flight_modes'] = flight_modes
except:
pass
curdoc().template_variables['has_position_data'] = True
# initialize parameter changes
changed_params = None
if not 'replay' in ulog.msg_info_dict: # replay can have many param changes
if len(ulog.changed_parameters) > 0:
changed_params = ulog.changed_parameters
plots.append(None) # save space for the param change button
### Add all data plots ###
x_range_offset = (ulog.last_timestamp - ulog.start_timestamp) * 0.05
x_range = Range1d(ulog.start_timestamp - x_range_offset, ulog.last_timestamp + x_range_offset)
# Altitude estimate
data_plot = DataPlot(data, plot_config, 'vehicle_gps_position',
y_axis_label='[m]', title='Altitude Estimate',
changed_params=changed_params, x_range=x_range)
data_plot.add_graph([lambda data: ('alt', vehicle_gps_position_altitude)],
colors8[0:1], ['GPS Altitude (MSL)'])
data_plot.change_dataset(baro_alt_meter_topic)
data_plot.add_graph(['baro_alt_meter'], colors8[1:2], ['Barometer Altitude'])
data_plot.change_dataset('vehicle_global_position')
data_plot.add_graph(['alt'], colors8[2:3], ['Fused Altitude Estimation'])
data_plot.change_dataset('position_setpoint_triplet')
data_plot.add_circle(['current.alt'], [plot_config['mission_setpoint_color']],
['Altitude Setpoint'])
data_plot.change_dataset(actuator_controls_0.thrust_sp_topic)
if actuator_controls_0.thrust_z_neg is not None:
data_plot.add_graph([lambda data: ('thrust', actuator_controls_0.thrust_z_neg*100)],
colors8[6:7], ['Thrust [0, 100]'])
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# VTOL tailistter orientation conversion, if relevant
if is_vtol_tailsitter:
[tailsitter_attitude, tailsitter_rates] = tailsitter_orientation(ulog, vtol_states)
# Roll/Pitch/Yaw angle & angular rate
for index, axis in enumerate(['roll', 'pitch', 'yaw']):
# angle
axis_name = axis.capitalize()
data_plot = DataPlot(data, plot_config, 'vehicle_attitude',
y_axis_label='[deg]', title=axis_name+' Angle',
plot_height='small', changed_params=changed_params,
x_range=x_range)
if is_vtol_tailsitter:
if tailsitter_attitude[axis] is not None:
data_plot.add_graph([lambda data: (axis+'_q',
np.rad2deg(tailsitter_attitude[axis]))],
colors3[0:1], [axis_name+' Estimated'], mark_nan=True)
else:
data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))],
colors3[0:1], [axis_name+' Estimated'], mark_nan=True)
data_plot.change_dataset('vehicle_attitude_setpoint')
data_plot.add_graph([lambda data: (axis+'_d', np.rad2deg(data[axis+'_d']))],
colors3[1:2], [axis_name+' Setpoint'],
use_step_lines=True)
if axis == 'yaw':
data_plot.add_graph(
[lambda data: ('yaw_sp_move_rate', np.rad2deg(data['yaw_sp_move_rate']))],
colors3[2:3], [axis_name+' FF Setpoint [deg/s]'],
use_step_lines=True)
data_plot.change_dataset('vehicle_attitude_groundtruth')
data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))],
[color_gray], [axis_name+' Groundtruth'])
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# rate
data_plot = DataPlot(data, plot_config, rate_estimated_topic_name,
y_axis_label='[deg/s]', title=axis_name+' Angular Rate',
plot_height='small', changed_params=changed_params,
x_range=x_range)
if is_vtol_tailsitter:
if tailsitter_rates[axis] is not None:
data_plot.add_graph([lambda data: (axis+'_q',
np.rad2deg(tailsitter_rates[axis]))],
colors3[0:1], [axis_name+' Rate Estimated'], mark_nan=True)
else:
data_plot.add_graph([lambda data: (axis+'speed',
np.rad2deg(data[rate_field_names[index]]))],
colors3[0:1], [axis_name+' Rate Estimated'], mark_nan=True)
data_plot.change_dataset('vehicle_rates_setpoint')
data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))],
colors3[1:2], [axis_name+' Rate Setpoint'],
mark_nan=True, use_step_lines=True)
axis_letter = axis[0].upper()
rate_int_limit = '(*100)'
# this param is MC/VTOL only (it will not exist on FW)
rate_int_limit_param = 'MC_' + axis_letter + 'R_INT_LIM'
if rate_int_limit_param in ulog.initial_parameters:
rate_int_limit = '[-{0:.0f}, {0:.0f}]'.format(
ulog.initial_parameters[rate_int_limit_param]*100)
data_plot.change_dataset('rate_ctrl_status')
data_plot.add_graph([lambda data: (axis, data[axis+'speed_integ']*100)],
colors3[2:3], [axis_name+' Rate Integral '+rate_int_limit])
data_plot.change_dataset(rate_groundtruth_topic_name)
data_plot.add_graph([lambda data: (axis+'speed',
np.rad2deg(data[rate_field_names[index]]))],
[color_gray], [axis_name+' Rate Groundtruth'])
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# Local position
for axis in ['x', 'y', 'z']:
data_plot = DataPlot(data, plot_config, 'vehicle_local_position',
y_axis_label='[m]', title='Local Position '+axis.upper(),
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph([axis], colors2[0:1], [axis.upper()+' Estimated'], mark_nan=True)
data_plot.change_dataset('vehicle_local_position_setpoint')
data_plot.add_graph([axis], colors2[1:2], [axis.upper()+' Setpoint'],
use_step_lines=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# Velocity
data_plot = DataPlot(data, plot_config, 'vehicle_local_position',
y_axis_label='[m/s]', title='Velocity',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['vx', 'vy', 'vz'], colors8[0:3], ['X', 'Y', 'Z'])
data_plot.change_dataset('vehicle_local_position_setpoint')
data_plot.add_graph(['vx', 'vy', 'vz'], [colors8[5], colors8[4], colors8[6]],
['X Setpoint', 'Y Setpoint', 'Z Setpoint'], use_step_lines=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# Visual Odometry (only if topic found)
if any(elem.name == 'vehicle_visual_odometry' for elem in data):
# Vision position
data_plot = DataPlot(data, plot_config, 'vehicle_visual_odometry',
y_axis_label='[m]', title='Visual Odometry Position',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['x', 'y', 'z'], colors3, ['X', 'Y', 'Z'], mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
data_plot.change_dataset('vehicle_local_position_groundtruth')
data_plot.add_graph(['x', 'y', 'z'], colors8[2:5],
['Groundtruth X', 'Groundtruth Y', 'Groundtruth Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# Vision velocity
data_plot = DataPlot(data, plot_config, 'vehicle_visual_odometry',
y_axis_label='[m]', title='Visual Odometry Velocity',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['vx', 'vy', 'vz'], colors3, ['X', 'Y', 'Z'], mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
data_plot.change_dataset('vehicle_local_position_groundtruth')
data_plot.add_graph(['vx', 'vy', 'vz'], colors8[2:5],
['Groundtruth VX', 'Groundtruth VY', 'Groundtruth VZ'])
if data_plot.finalize() is not None: plots.append(data_plot)
# Vision attitude
data_plot = DataPlot(data, plot_config, 'vehicle_visual_odometry',
y_axis_label='[deg]', title='Visual Odometry Attitude',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph([lambda data: ('roll', np.rad2deg(data['roll'])),
lambda data: ('pitch', np.rad2deg(data['pitch'])),
lambda data: ('yaw', np.rad2deg(data['yaw']))],
colors3, ['Roll', 'Pitch', 'Yaw'], mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
data_plot.change_dataset('vehicle_attitude_groundtruth')
data_plot.add_graph([lambda data: ('roll', np.rad2deg(data['roll'])),
lambda data: ('pitch', np.rad2deg(data['pitch'])),
lambda data: ('yaw', np.rad2deg(data['yaw']))],
colors8[2:5],
['Roll Groundtruth', 'Pitch Groundtruth', 'Yaw Groundtruth'])
if data_plot.finalize() is not None: plots.append(data_plot)
# Vision attitude rate
data_plot = DataPlot(data, plot_config, 'vehicle_visual_odometry',
y_axis_label='[deg]', title='Visual Odometry Attitude Rate',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph([lambda data: ('rollspeed', np.rad2deg(data['rollspeed'])),
lambda data: ('pitchspeed', np.rad2deg(data['pitchspeed'])),
lambda data: ('yawspeed', np.rad2deg(data['yawspeed']))],
colors3, ['Roll Rate', 'Pitch Rate', 'Yaw Rate'], mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
data_plot.change_dataset(rate_groundtruth_topic_name)
data_plot.add_graph([lambda data: ('rollspeed', np.rad2deg(data[rate_field_names[0]])),
lambda data: ('pitchspeed', np.rad2deg(data[rate_field_names[1]])),
lambda data: ('yawspeed', np.rad2deg(data[rate_field_names[2]]))],
colors8[2:5],
['Roll Rate Groundtruth', 'Pitch Rate Groundtruth',
'Yaw Rate Groundtruth'])
if data_plot.finalize() is not None: plots.append(data_plot)
# Vision latency
data_plot = DataPlot(data, plot_config, 'vehicle_visual_odometry',
y_axis_label='[ms]', title='Visual Odometry Latency',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(
[lambda data: ('latency', 1e-3*(data['timestamp'] - data['timestamp_sample']))],
colors3, ['VIO Latency'], mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# Airspeed vs Ground speed: but only if there's valid airspeed data or a VTOL
try:
if is_vtol or ulog.get_dataset('airspeed') is not None:
data_plot = DataPlot(data, plot_config, 'vehicle_global_position',
y_axis_label='[m/s]', title='Airspeed',
plot_height='small',
changed_params=changed_params, x_range=x_range)
data_plot.add_graph([lambda data: ('groundspeed_estimated',
np.sqrt(data['vel_n']**2 + data['vel_e']**2))],
colors8[0:1], ['Ground Speed Estimated'])
if any(elem.name == 'airspeed_validated' for elem in data):
airspeed_validated = ulog.get_dataset('airspeed_validated')
data_plot.change_dataset('airspeed_validated')
if np.amax(airspeed_validated.data['airspeed_sensor_measurement_valid']) == 1:
data_plot.add_graph(['true_airspeed_m_s'], colors8[1:2],
['True Airspeed'])
else:
data_plot.add_graph(['true_ground_minus_wind_m_s'], colors8[1:2],
['True Airspeed (estimated)'])
else:
data_plot.change_dataset('airspeed')
data_plot.add_graph(['indicated_airspeed_m_s'], colors8[1:2],
['Indicated Airspeed'])
data_plot.change_dataset('vehicle_gps_position')
data_plot.add_graph(['vel_m_s'], colors8[2:3], ['Ground Speed (from GPS)'])
data_plot.change_dataset('tecs_status')
data_plot.add_graph(['true_airspeed_sp'], colors8[3:4], ['True Airspeed Setpoint'])
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
except (KeyError, IndexError) as error:
pass
# TECS (fixed-wing or VTOLs)
data_plot = DataPlot(data, plot_config, 'tecs_status', y_start=0, title='TECS',
y_axis_label='[m/s]', plot_height='small',
changed_params=changed_params, x_range=x_range)
data_plot.add_graph(['height_rate', 'height_rate_setpoint'],
colors2, ['Height Rate', 'Height Rate Setpoint'],
mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# manual control inputs
# prefer the manual_control_setpoint topic. Old logs do not contain it
if any(elem.name == 'manual_control_setpoint' for elem in data):
data_plot = DataPlot(data, plot_config, 'manual_control_setpoint',
title='Manual Control Inputs (Radio or Joystick)',
plot_height='small', y_range=Range1d(-1.1, 1.1),
changed_params=changed_params, x_range=x_range)
data_plot.add_graph(manual_control_sp_controls + ['aux1', 'aux2'], colors8[0:6],
['Y / Roll', 'X / Pitch', 'Yaw',
'Throttle ' + manual_control_sp_throttle_range, 'Aux1', 'Aux2'])
data_plot.change_dataset(manual_control_switches_topic)
data_plot.add_graph([lambda data: ('mode_slot', data['mode_slot']/6),
lambda data: ('kill_switch', data['kill_switch'] == 1)],
colors8[6:8], ['Flight Mode', 'Kill Switch'])
# TODO: add RTL switch and others? Look at params which functions are mapped?
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
else: # it's an old log (COMPATIBILITY)
data_plot = DataPlot(data, plot_config, 'rc_channels',
title='Raw Radio Control Inputs',
plot_height='small', y_range=Range1d(-1.1, 1.1),
changed_params=changed_params, x_range=x_range)
num_rc_channels = 8
if data_plot.dataset:
max_channels = np.amax(data_plot.dataset.data['channel_count'])
if max_channels < num_rc_channels: num_rc_channels = max_channels
legends = []
for i in range(num_rc_channels):
channel_names = px4_ulog.get_configured_rc_input_names(i)
if channel_names is None:
legends.append('Channel '+str(i))
else:
legends.append('Channel '+str(i)+' ('+', '.join(channel_names)+')')
data_plot.add_graph(['channels['+str(i)+']' for i in range(num_rc_channels)],
colors8[0:num_rc_channels], legends, mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# actuator controls 0
data_plot = DataPlot(data, plot_config, actuator_controls_0.torque_sp_topic,
y_start=0, title='Actuator Controls',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(actuator_controls_0.torque_axes_field_names,
colors8[0:3], ['Roll', 'Pitch', 'Yaw'], mark_nan=True)
data_plot.change_dataset(actuator_controls_0.thrust_sp_topic)
if actuator_controls_0.thrust_z_neg is not None:
data_plot.add_graph([lambda data: ('thrust', actuator_controls_0.thrust_z_neg)],
colors8[3:4], ['Thrust (up)'], mark_nan=True)
if actuator_controls_0.thrust_x is not None:
data_plot.add_graph([lambda data: ('thrust', actuator_controls_0.thrust_x)],
colors8[4:5], ['Thrust (forward)'], mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# actuator controls (Main) FFT (for filter & output noise analysis)
data_plot = DataPlotFFT(data, plot_config, actuator_controls_0.torque_sp_topic,
title='Actuator Controls FFT', y_range = Range1d(0, 0.01))
data_plot.add_graph(actuator_controls_0.torque_axes_field_names,
colors3, ['Roll', 'Pitch', 'Yaw'])
if not data_plot.had_error:
if 'MC_DTERM_CUTOFF' in ulog.initial_parameters: # COMPATIBILITY
data_plot.mark_frequency(
ulog.initial_parameters['MC_DTERM_CUTOFF'],
'MC_DTERM_CUTOFF')
if 'IMU_DGYRO_CUTOFF' in ulog.initial_parameters:
data_plot.mark_frequency(
ulog.initial_parameters['IMU_DGYRO_CUTOFF'],
'IMU_DGYRO_CUTOFF')
if 'IMU_GYRO_CUTOFF' in ulog.initial_parameters:
data_plot.mark_frequency(
ulog.initial_parameters['IMU_GYRO_CUTOFF'],
'IMU_GYRO_CUTOFF', 20)
if data_plot.finalize() is not None: plots.append(data_plot)
# angular_velocity FFT (for filter & output noise analysis)
data_plot = DataPlotFFT(data, plot_config, 'vehicle_angular_velocity',
title='Angular Velocity FFT', y_range = Range1d(0, 0.01))
data_plot.add_graph(['xyz[0]', 'xyz[1]', 'xyz[2]'],
colors3, ['Rollspeed', 'Pitchspeed', 'Yawspeed'])
if not data_plot.had_error:
if 'IMU_GYRO_CUTOFF' in ulog.initial_parameters:
data_plot.mark_frequency(
ulog.initial_parameters['IMU_GYRO_CUTOFF'],
'IMU_GYRO_CUTOFF', 20)
if 'IMU_GYRO_NF_FREQ' in ulog.initial_parameters:
if ulog.initial_parameters['IMU_GYRO_NF_FREQ'] > 0:
data_plot.mark_frequency(
ulog.initial_parameters['IMU_GYRO_NF_FREQ'],
'IMU_GYRO_NF_FREQ', 70)
if data_plot.finalize() is not None: plots.append(data_plot)
# angular_acceleration FFT (for filter & output noise analysis)
data_plot = DataPlotFFT(data, plot_config, 'vehicle_angular_acceleration',
title='Angular Acceleration FFT')
data_plot.add_graph(['xyz[0]', 'xyz[1]', 'xyz[2]'],
colors3, ['Roll accel', 'Pitch accel', 'Yaw accel'])
if not data_plot.had_error:
if 'IMU_DGYRO_CUTOFF' in ulog.initial_parameters:
data_plot.mark_frequency(
ulog.initial_parameters['IMU_DGYRO_CUTOFF'],
'IMU_DGYRO_CUTOFF')
if 'IMU_GYRO_NF_FREQ' in ulog.initial_parameters:
if ulog.initial_parameters['IMU_GYRO_NF_FREQ'] > 0:
data_plot.mark_frequency(
ulog.initial_parameters['IMU_GYRO_NF_FREQ'],
'IMU_GYRO_NF_FREQ', 70)
if data_plot.finalize() is not None: plots.append(data_plot)
# actuator controls 1 (torque + thrust)
# (only present on VTOL, Fixed-wing config)
data_plot = DataPlot(data, plot_config, actuator_controls_1.torque_sp_topic,
y_start=0, title='Actuator Controls 1 (VTOL in Fixed-Wing mode)',
plot_height='small', changed_params=changed_params, topic_instance=1,
x_range=x_range)
data_plot.add_graph(actuator_controls_1.torque_axes_field_names,
colors8[0:3], ['Roll', 'Pitch', 'Yaw'], mark_nan=True)
data_plot.change_dataset(actuator_controls_1.thrust_sp_topic,
actuator_controls_1.topic_instance)
if actuator_controls_1.thrust_x is not None:
data_plot.add_graph([lambda data: ('thrust', actuator_controls_1.thrust_x)],
colors8[3:4], ['Thrust (forward)'], mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
if dynamic_control_alloc:
# actuator motors, actuator servos
actuator_output_plots = [("actuator_motors", "Motor"), ("actuator_servos", "Servo")]
for topic_name, plot_name in actuator_output_plots:
data_plot = DataPlot(data, plot_config, topic_name,
y_range=Range1d(-1, 1), title=plot_name+' Outputs',
plot_height='small', changed_params=changed_params,
x_range=x_range)
num_actuator_outputs = 12
if data_plot.dataset:
for i in range(num_actuator_outputs):
try:
output_data = data_plot.dataset.data['control['+str(i)+']']
except KeyError:
num_actuator_outputs = i
break
if np.isnan(output_data).all():
num_actuator_outputs = i
break
if num_actuator_outputs > 0:
data_plot.add_graph(['control['+str(i)+']'
for i in range(num_actuator_outputs)],
[colors8[i % 8] for i in range(num_actuator_outputs)],
[plot_name+' '+str(i+1)
for i in range(num_actuator_outputs)])
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
else:
actuator_output_plots = [(0, "Actuator Outputs (Main)"), (1, "Actuator Outputs (AUX)"),
(2, "Actuator Outputs (EXTRA)")]
for topic_instance, plot_name in actuator_output_plots:
data_plot = DataPlot(data, plot_config, 'actuator_outputs',
y_start=0, title=plot_name, plot_height='small',
changed_params=changed_params, topic_instance=topic_instance,
x_range=x_range)
num_actuator_outputs = 16
# only plot if at least one of the outputs is not constant
all_constant = True
if data_plot.dataset:
max_outputs = np.amax(data_plot.dataset.data['noutputs'])
if max_outputs < num_actuator_outputs: num_actuator_outputs = max_outputs
for i in range(num_actuator_outputs):
output_data = data_plot.dataset.data['output['+str(i)+']']
if not np.all(output_data == output_data[0]):
all_constant = False
if not all_constant:
data_plot.add_graph(['output['+str(i)+']' for i in range(num_actuator_outputs)],
[colors8[i % 8] for i in range(num_actuator_outputs)],
['Output '+str(i) for i in range(num_actuator_outputs)],
mark_nan=True)
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# raw acceleration
data_plot = DataPlot(data, plot_config, 'sensor_combined',
y_axis_label='[m/s^2]', title='Raw Acceleration',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['accelerometer_m_s2[0]', 'accelerometer_m_s2[1]',
'accelerometer_m_s2[2]'], colors3, ['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# Vibration Metrics
data_plot = DataPlot(data, plot_config, 'vehicle_imu_status',
title='Vibration Metrics',
plot_height='small', changed_params=changed_params,
x_range=x_range, y_start=0, topic_instance=0)
data_plot.add_graph(['accel_vibration_metric'], colors8[0:1],
['Accel 0 Vibration Level [m/s^2]'])
data_plot.change_dataset('vehicle_imu_status', 1)
data_plot.add_graph(['accel_vibration_metric'], colors8[1:2],
['Accel 1 Vibration Level [m/s^2]'])
data_plot.change_dataset('vehicle_imu_status', 2)
data_plot.add_graph(['accel_vibration_metric'], colors8[2:3],
['Accel 2 Vibration Level [m/s^2]'])
data_plot.change_dataset('vehicle_imu_status', 3)
data_plot.add_graph(['accel_vibration_metric'], colors8[3:4],
['Accel 3 Vibration Level [rad/s]'])
data_plot.add_horizontal_background_boxes(
['green', 'orange', 'red'], [4.905, 9.81])
if data_plot.finalize() is not None: plots.append(data_plot)
# Acceleration Spectrogram
data_plot = DataPlotSpec(data, plot_config, 'sensor_combined',
y_axis_label='[Hz]', title='Acceleration Power Spectral Density',
plot_height='small', x_range=x_range)
data_plot.add_graph(['accelerometer_m_s2[0]', 'accelerometer_m_s2[1]', 'accelerometer_m_s2[2]'],
['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# Filtered Gyro (angular velocity) Spectrogram
data_plot = DataPlotSpec(data, plot_config, 'vehicle_angular_velocity',
y_axis_label='[Hz]', title='Angular velocity Power Spectral Density',
plot_height='small', x_range=x_range)
data_plot.add_graph(['xyz[0]', 'xyz[1]', 'xyz[2]'],
['rollspeed', 'pitchspeed', 'yawspeed'])
if data_plot.finalize() is not None: plots.append(data_plot)
# Filtered angular acceleration Spectrogram
data_plot = DataPlotSpec(data, plot_config, 'vehicle_angular_acceleration',
y_axis_label='[Hz]',
title='Angular acceleration Power Spectral Density',
plot_height='small', x_range=x_range)
data_plot.add_graph(['xyz[0]', 'xyz[1]', 'xyz[2]'],
['roll accel', 'pitch accel', 'yaw accel'])
if data_plot.finalize() is not None: plots.append(data_plot)
# raw angular speed
data_plot = DataPlot(data, plot_config, 'sensor_combined',
y_axis_label='[deg/s]', title='Raw Angular Speed (Gyroscope)',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph([
lambda data: ('gyro_rad[0]', np.rad2deg(data['gyro_rad[0]'])),
lambda data: ('gyro_rad[1]', np.rad2deg(data['gyro_rad[1]'])),
lambda data: ('gyro_rad[2]', np.rad2deg(data['gyro_rad[2]']))],
colors3, ['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# FIFO accel
for instance in range(3):
if add_virtual_fifo_topic_data(ulog, 'sensor_accel_fifo', instance):
# Raw data
data_plot = DataPlot(data, plot_config, 'sensor_accel_fifo_virtual',
y_axis_label='[m/s^2]',
title=f'Raw Acceleration (FIFO, IMU{instance})',
plot_height='small', changed_params=changed_params,
x_range=x_range, topic_instance=instance)
data_plot.add_graph(['x', 'y', 'z'], colors3, ['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# power spectral density
data_plot = DataPlotSpec(data, plot_config, 'sensor_accel_fifo_virtual',
y_axis_label='[Hz]',
title=(f'Acceleration Power Spectral Density'
f'(FIFO, IMU{instance})'),
plot_height='normal', x_range=x_range, topic_instance=instance)
data_plot.add_graph(['x', 'y', 'z'], ['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# sampling regularity
data_plot = DataPlot(data, plot_config, 'sensor_accel_fifo', y_range=Range1d(0, 25e3),
y_axis_label='[us]',
title=f'Sampling Regularity of Sensor Data (FIFO, IMU{instance})',
plot_height='small',
changed_params=changed_params,
x_range=x_range, topic_instance=instance)
sensor_accel_fifo = ulog.get_dataset('sensor_accel_fifo').data
sampling_diff = np.diff(sensor_accel_fifo['timestamp'])
min_sampling_diff = np.amin(sampling_diff)
plot_dropouts(data_plot.bokeh_plot, ulog.dropouts, min_sampling_diff)
data_plot.add_graph([lambda data: ('timediff', np.append(sampling_diff, 0))],
[colors3[2]], ['delta t (between 2 logged samples)'])
if data_plot.finalize() is not None: plots.append(data_plot)
# FIFO gyro
for instance in range(3):
if add_virtual_fifo_topic_data(ulog, 'sensor_gyro_fifo', instance):
# Raw data
data_plot = DataPlot(data, plot_config, 'sensor_gyro_fifo_virtual',
y_axis_label='[deg/s]', title=f'Raw Gyro (FIFO, IMU{instance})',
plot_height='small', changed_params=changed_params,
x_range=x_range, topic_instance=instance)
data_plot.add_graph(['x', 'y', 'z'], colors3, ['X', 'Y', 'Z'])
data_plot.add_graph([
lambda data: ('x', np.rad2deg(data['x'])),
lambda data: ('y', np.rad2deg(data['y'])),
lambda data: ('z', np.rad2deg(data['z']))],
colors3, ['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# power spectral density
data_plot = DataPlotSpec(data, plot_config, 'sensor_gyro_fifo_virtual',
y_axis_label='[Hz]',
title=f'Gyro Power Spectral Density (FIFO, IMU{instance})',
plot_height='normal', x_range=x_range, topic_instance=instance)
data_plot.add_graph(['x', 'y', 'z'], ['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# magnetic field strength
data_plot = DataPlot(data, plot_config, magnetometer_ga_topic,
y_axis_label='[gauss]', title='Raw Magnetic Field Strength',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['magnetometer_ga[0]', 'magnetometer_ga[1]',
'magnetometer_ga[2]'], colors3,
['X', 'Y', 'Z'])
if data_plot.finalize() is not None: plots.append(data_plot)
# distance sensor
data_plot = DataPlot(data, plot_config, 'distance_sensor',
y_start=0, y_axis_label='[m]', title='Distance Sensor',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['current_distance', 'variance'], colors3[0:2],
['Distance', 'Variance'])
if data_plot.finalize() is not None: plots.append(data_plot)
# gps uncertainty
# the accuracy values can be really large if there is no fix, so we limit the
# y axis range to some sane values
data_plot = DataPlot(data, plot_config, 'vehicle_gps_position',
title='GPS Uncertainty', y_range=Range1d(0, 40),
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['eph', 'epv', 'satellites_used', 'fix_type'], colors8[::2],
['Horizontal position accuracy [m]', 'Vertical position accuracy [m]',
'Num Satellites used', 'GPS Fix'])
if data_plot.finalize() is not None: plots.append(data_plot)
# gps noise & jamming
data_plot = DataPlot(data, plot_config, 'vehicle_gps_position',
y_start=0, title='GPS Noise & Jamming',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['noise_per_ms', 'jamming_indicator'], colors3[0:2],
['Noise per ms', 'Jamming Indicator'])
if data_plot.finalize() is not None: plots.append(data_plot)
# thrust and magnetic field
data_plot = DataPlot(data, plot_config, magnetometer_ga_topic,
y_start=0, title='Thrust and Magnetic Field', plot_height='small',
changed_params=changed_params, x_range=x_range)
data_plot.add_graph(
[lambda data: ('len_mag', np.sqrt(data['magnetometer_ga[0]']**2 +
data['magnetometer_ga[1]']**2 +
data['magnetometer_ga[2]']**2))],
colors3[0:1], ['Norm of Magnetic Field'])
data_plot.change_dataset(actuator_controls_0.thrust_sp_topic)
if actuator_controls_0.thrust is not None:
data_plot.add_graph([lambda data: ('thrust', actuator_controls_0.thrust)],
colors3[1:2], ['Thrust'])
if is_vtol and not dynamic_control_alloc:
data_plot.change_dataset(actuator_controls_1.thrust_sp_topic)
if actuator_controls_1.thrust_x is not None:
data_plot.add_graph([lambda data: ('thrust', actuator_controls_1.thrust_x)],
colors3[2:3], ['Thrust (Fixed-wing'])
if data_plot.finalize() is not None: plots.append(data_plot)
# power
data_plot = DataPlot(data, plot_config, 'battery_status',
y_start=0, title='Power',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['voltage_v', 'voltage_filtered_v',
'current_a', lambda data: ('discharged_mah', data['discharged_mah']/100),
lambda data: ('remaining', data['remaining']*10)],
colors8[::2]+colors8[1:2],
['Battery Voltage [V]', 'Battery Voltage filtered [V]',
'Battery Current [A]', 'Discharged Amount [mAh / 100]',
'Battery remaining [0=empty, 10=full]'])
data_plot.change_dataset('system_power')
if data_plot.dataset:
if 'voltage5v_v' in data_plot.dataset.data and \
np.amax(data_plot.dataset.data['voltage5v_v']) > 0.0001:
data_plot.add_graph(['voltage5v_v'], colors8[7:8], ['5 V'])
if 'sensors3v3[0]' in data_plot.dataset.data and \
np.amax(data_plot.dataset.data['sensors3v3[0]']) > 0.0001:
data_plot.add_graph(['sensors3v3[0]'], colors8[5:6], ['3.3 V'])
if data_plot.finalize() is not None: plots.append(data_plot)
#Temperature
data_plot = DataPlot(data, plot_config, 'sensor_baro',
y_start=0, y_axis_label='[C]', title='Temperature',
plot_height='small', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['temperature'], colors8[0:1],
['Baro temperature'])
data_plot.change_dataset('sensor_accel')
data_plot.add_graph(['temperature'], colors8[2:3],
['Accel temperature'])
data_plot.change_dataset('airspeed')
data_plot.add_graph(['air_temperature_celsius'], colors8[4:5],
['Airspeed temperature'])
data_plot.change_dataset('battery_status')
data_plot.add_graph(['temperature'], colors8[6:7],
['Battery temperature'])
if data_plot.finalize() is not None: plots.append(data_plot)
# estimator flags
try:
data_plot = DataPlot(data, plot_config, 'estimator_status',
y_start=0, title='Estimator Flags',
plot_height='small', changed_params=changed_params,
x_range=x_range)
estimator_status = ulog.get_dataset('estimator_status').data
plot_data = []
plot_labels = []
input_data = [
('Health Flags (vel, pos, hgt)', estimator_status['health_flags']),
('Timeout Flags (vel, pos, hgt)', estimator_status['timeout_flags']),
('Velocity Check Bit', (estimator_status['innovation_check_flags'])&0x1),
('Horizontal Position Check Bit', (estimator_status['innovation_check_flags']>>1)&1),
('Vertical Position Check Bit', (estimator_status['innovation_check_flags']>>2)&1),
('Mag X, Y, Z Check Bits', (estimator_status['innovation_check_flags']>>3)&0x7),
('Yaw Check Bit', (estimator_status['innovation_check_flags']>>6)&1),
('Airspeed Check Bit', (estimator_status['innovation_check_flags']>>7)&1),
('Synthetic Sideslip Check Bit', (estimator_status['innovation_check_flags']>>8)&1),
('Height to Ground Check Bit', (estimator_status['innovation_check_flags']>>9)&1),
('Optical Flow X, Y Check Bits', (estimator_status['innovation_check_flags']>>10)&0x3),
]
# filter: show only the flags that have non-zero samples
for cur_label, cur_data in input_data:
if np.amax(cur_data) > 0.1:
data_label = 'flags_'+str(len(plot_data)) # just some unique string
plot_data.append(lambda d, data=cur_data, label=data_label: (label, data))
plot_labels.append(cur_label)
if len(plot_data) >= 8: # cannot add more than that
break
if len(plot_data) == 0:
# add the plot even in the absence of any problem, so that the user
# can validate that (otherwise it's ambiguous: it could be that the
# estimator_status topic is not logged)
plot_data = [lambda d: ('flags', input_data[0][1])]
plot_labels = [input_data[0][0]]
data_plot.add_graph(plot_data, colors8[0:len(plot_data)], plot_labels)
if data_plot.finalize() is not None: plots.append(data_plot)
except (KeyError, IndexError) as error:
print('Error in estimator plot: '+str(error))
# Failsafe flags
try:
data_plot = DataPlot(data, plot_config, 'vehicle_status',
y_start=0, title='Failsafe Flags',
plot_height='normal', changed_params=changed_params,
x_range=x_range)
data_plot.add_graph(['failsafe', 'failsafe_and_user_took_over'], [colors8[0], colors8[1]],
['In Failsafe', 'User Took Over'])
num_graphs = 2
skip_if_always_set = ['auto_mission_missing', 'offboard_control_signal_lost']
data_plot.change_dataset('failsafe_flags')
if data_plot.dataset is not None:
failsafe_flags = data_plot.dataset.data
for failsafe_field in failsafe_flags:
if failsafe_field == 'timestamp' or failsafe_field.startswith('mode_req_'):
continue
cur_data = failsafe_flags[failsafe_field]
# filter: show only the flags that are set at some point
if np.amax(cur_data) >= 1:
if failsafe_field in skip_if_always_set and np.amin(cur_data) >= 1:
continue
data_plot.add_graph([failsafe_field], [colors8[num_graphs % 8]],
[failsafe_field.replace('_', ' ')])
num_graphs += 1
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
except (KeyError, IndexError) as error:
print('Error in failsafe plot: '+str(error))
# cpu load
data_plot = DataPlot(data, plot_config, 'cpuload',
title='CPU & RAM', plot_height='small', y_range=Range1d(0, 1),
changed_params=changed_params, x_range=x_range)
data_plot.add_graph(['ram_usage', 'load'], [colors3[1], colors3[2]],
['RAM Usage', 'CPU Load'])
data_plot.add_span('load', line_color=colors3[2])
data_plot.add_span('ram_usage', line_color=colors3[1])
plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states)
if data_plot.finalize() is not None: plots.append(data_plot)
# sampling: time difference
try:
data_plot = DataPlot(data, plot_config, 'sensor_combined', y_range=Range1d(0, 25e3),
y_axis_label='[us]',
title='Sampling Regularity of Sensor Data', plot_height='small',
changed_params=changed_params, x_range=x_range)
sensor_combined = ulog.get_dataset('sensor_combined').data
sampling_diff = np.diff(sensor_combined['timestamp'])
min_sampling_diff = np.amin(sampling_diff)
plot_dropouts(data_plot.bokeh_plot, ulog.dropouts, min_sampling_diff)
data_plot.add_graph([lambda data: ('timediff', np.append(sampling_diff, 0))],
[colors3[2]], ['delta t (between 2 logged samples)'])
data_plot.change_dataset('estimator_status')
data_plot.add_graph([lambda data: ('time_slip', data['time_slip']*1e6)],
[colors3[1]], ['Estimator time slip (cumulative)'])
if data_plot.finalize() is not None: plots.append(data_plot)
except:
pass
# exchange all DataPlot's with the bokeh_plot and handle parameter changes
param_changes_button = Button(label="Hide Parameter Changes", width=170)
param_change_labels = []
# FIXME: this should be a CustomJS callback, not on the server. However this
# did not work for me.
def param_changes_button_clicked():
""" callback to show/hide parameter changes """
for label in param_change_labels:
if label.visible:
param_changes_button.label = 'Show Parameter Changes'
label.visible = False
label.text_alpha = 0 # label.visible does not work, so we use this instead
else:
param_changes_button.label = 'Hide Parameter Changes'
label.visible = True
label.text_alpha = 1
param_changes_button.on_click(param_changes_button_clicked)
jinja_plot_data = []
for i in range(len(plots)):
if plots[i] is None:
plots[i] = column(param_changes_button, width=int(plot_width * 0.99))
if isinstance(plots[i], DataPlot):
if plots[i].param_change_label is not None:
param_change_labels.append(plots[i].param_change_label)
plot_title = plots[i].title
plots[i] = plots[i].bokeh_plot
fragment = 'Nav-'+plot_title.replace(' ', '-') \
.replace('&', '_').replace('(', '').replace(')', '')
jinja_plot_data.append({
'model_id': plots[i].ref['id'],
'fragment': fragment,
'title': plot_title
})
# changed parameters
plots.append(get_changed_parameters(ulog, plot_width))
# information about which messages are contained in the log
# TODO: need to load all topics for this (-> log loading will take longer)
# but if we load all topics and the log contains some (external) topics
# with buggy timestamps, it will affect the plotting.
# data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
# table_text = []
# for d in data_list_sorted:
# message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
# num_data_points = len(d.data['timestamp'])
# table_text.append((d.name, str(d.multi_id), str(message_size), str(num_data_points),
# str(message_size * num_data_points)))
# topics_info = '<table><tr><th>Name</th><th>Topic instance</th><th>Message Size</th>' \
# '<th>Number of data points</th><th>Total bytes</th></tr>' + ''.join(
# ['<tr><td>'+'</td><td>'.join(list(x))+'</td></tr>' for x in table_text]) + '</table>'
# topics_div = Div(text=topics_info, width=int(plot_width*0.9))
# plots.append(column(topics_div, width=int(plot_width*0.9)))
# log messages
plots.append(get_logged_messages(ulog, plot_width))
# console messages, perf & top output
top_data = ''
perf_data = ''
console_messages = ''
if 'boot_console_output' in ulog.msg_info_multiple_dict:
console_output = ulog.msg_info_multiple_dict['boot_console_output'][0]
console_output = escape(''.join(console_output))
console_messages = '<p><pre>'+console_output+'</pre></p>'
for state in ['pre', 'post']:
if 'perf_top_'+state+'flight' in ulog.msg_info_multiple_dict:
current_top_data = ulog.msg_info_multiple_dict['perf_top_'+state+'flight'][0]
flight_data = escape('\n'.join(current_top_data))
top_data += '<p>'+state.capitalize()+' Flight:<br/><pre>'+flight_data+'</pre></p>'
if 'perf_counter_'+state+'flight' in ulog.msg_info_multiple_dict:
current_perf_data = ulog.msg_info_multiple_dict['perf_counter_'+state+'flight'][0]
flight_data = escape('\n'.join(current_perf_data))
perf_data += '<p>'+state.capitalize()+' Flight:<br/><pre>'+flight_data+'</pre></p>'
if 'perf_top_watchdog' in ulog.msg_info_multiple_dict:
current_top_data = ulog.msg_info_multiple_dict['perf_top_watchdog'][0]
flight_data = escape('\n'.join(current_top_data))
top_data += '<p>Watchdog:<br/><pre>'+flight_data+'</pre></p>'
additional_data_html = ''
if len(console_messages) > 0:
additional_data_html += '<h5>Console Output</h5>'+console_messages
if len(top_data) > 0:
additional_data_html += '<h5>Processes</h5>'+top_data
if len(perf_data) > 0:
additional_data_html += '<h5>Performance Counters</h5>'+perf_data
if len(additional_data_html) > 0:
# hide by default & use a button to expand
additional_data_html = '''
<button id="show-additional-data-btn" class="btn btn-secondary" data-toggle="collapse" style="min-width:0;"
data-target="#show-additional-data">Show additional Data</button>
<div id="show-additional-data" class="collapse">
{:}
</div>
'''.format(additional_data_html)
curdoc().template_variables['additional_info'] = additional_data_html
curdoc().template_variables['plots'] = jinja_plot_data
return plots
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,366 | PX4/flight_review | refs/heads/main | /app/plot_app/pid_analysis.py | """ PID response analysis """
import colorsys
import numpy as np
from bokeh.models import Range1d, Span, LinearColorMapper, ColumnDataSource, LabelSet
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter1d
from config import colors3
from plotting import DataPlot
# keep the same formatting as the original code
# pylint: skip-file
# Source: https://github.com/Plasmatree/PID-Analyzer
# "THE BEER-WARE LICENSE" (Revision 42):
# <florian.melsheimer@gmx.de> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. Florian Melsheimer
class Trace:
""" PID response analysis based on a deconvolution using a
setpoint and the measured process variable as inputs.
It computes an average, stdev and a 2D histogram.
"""
framelen = 1. # length of each single frame over which to compute response
resplen = 0.5 # length of respose window
cutfreq = 25. # cutfreqency of what is considered as input
tuk_alpha = 1.0 # alpha of tukey window, if used
superpos = 16 # sub windowing (superpos windows in framelen)
threshold = 500. # threshold for 'high input rate'
noise_framelen = 0.3 # window width for noise analysis
noise_superpos = 16 # subsampling for noise analysis windows
def __init__(self, name, time, gyro_rate, gyro_setpoint, throttle,
d_err=None, debug=None):
"""Initialize a Trace object, that does the analysis for a single axis.
Note: all data arrays must have the same length as time
:param name: axis name (e.g. roll)
:param time: np array with sampling times [s]
:param gyro_rate: np array with the gyro rates [deg/s]
:param throttle: np array with the throttle input [0, 100]
:param d_err: np array with D term error (optional)
:param debug: TODO
"""
# equally space samples in time
data = {
'gyro': gyro_rate,
'input': gyro_setpoint,
'throttle': throttle
}
if d_err is not None: data['d_err'] = d_err
if debug is not None: data['debug'] = debug
self.time, self.data = self.equalize_data(time, data)
self.gyro = self.data['gyro']
self.input = self.data['input']
self.throttle = self.data['throttle']
self.dt = self.time[0]-self.time[1]
self.data['time'] = self.time
self.name = name
#enable this to generate artifical gyro trace with known system response
#self.gyro=self.toy_out(self.input, delay=0.01, mode='normal')####
self.flen = self.stepcalc(self.time, Trace.framelen) # array len corresponding to framelen in s
self.rlen = self.stepcalc(self.time, Trace.resplen) # array len corresponding to resplen in s
self.time_resp = self.time[0:self.rlen]-self.time[0]
self.stacks = self.winstacker({'time':[],'input':[],'gyro':[], 'throttle':[]}, self.flen, Trace.superpos) # [[time, input, output],]
self.window = np.hanning(self.flen) #self.tukeywin(self.flen, self.tuk_alpha)
self.spec_sm, self.avr_t, self.avr_in, self.max_in, self.max_thr = self.stack_response(self.stacks, self.window)
self.low_mask, self.high_mask = self.low_high_mask(self.max_in, self.threshold) #calcs masks for high and low inputs according to threshold
self.toolow_mask = self.low_high_mask(self.max_in, 20)[1] #mask for ignoring noisy low input
# commented, because it's unused
# self.resp_sm = self.weighted_mode_avr(self.spec_sm, self.toolow_mask, [-1.5,3.5], 1000)
# self.resp_quality = -self.to_mask((np.abs(self.spec_sm -self.resp_sm[0]).mean(axis=1)).clip(0.5-1e-9,0.5))+1.
# # masking by setting trottle of unwanted traces to neg
# self.thr_response = self.hist2d(self.max_thr * (2. * (self.toolow_mask*self.resp_quality) - 1.), self.time_resp,
# (self.spec_sm.transpose() * self.toolow_mask).transpose(), [101, self.rlen])
self.resp_low = self.weighted_mode_avr(self.spec_sm, self.low_mask*self.toolow_mask, [-1.5,3.5], 1000)
if self.high_mask.sum()>0:
self.resp_high = self.weighted_mode_avr(self.spec_sm, self.high_mask*self.toolow_mask, [-1.5,3.5], 1000)
if 'd_err' in self.data:
self.noise_winlen = self.stepcalc(self.time, Trace.noise_framelen)
self.noise_stack = self.winstacker({'time':[], 'gyro':[], 'throttle':[], 'd_err':[], 'debug':[]},
self.noise_winlen, Trace.noise_superpos)
self.noise_win = np.hanning(self.noise_winlen)
self.noise_gyro = self.stackspectrum(self.noise_stack['time'],self.noise_stack['throttle'],self.noise_stack['gyro'], self.noise_win)
self.noise_d = self.stackspectrum(self.noise_stack['time'], self.noise_stack['throttle'], self.noise_stack['d_err'], self.noise_win)
self.noise_debug = self.stackspectrum(self.noise_stack['time'], self.noise_stack['throttle'], self.noise_stack['debug'], self.noise_win)
if self.noise_debug['hist2d'].sum()>0:
## mask 0 entries
thr_mask = self.noise_gyro['throt_hist_avr'].clip(0,1)
self.filter_trans = np.average(self.noise_gyro['hist2d'], axis=1, weights=thr_mask)/\
np.average(self.noise_debug['hist2d'], axis=1, weights=thr_mask)
else:
self.filter_trans = self.noise_gyro['hist2d'].mean(axis=1)*0.
@staticmethod
def low_high_mask(signal, threshold):
low = np.copy(signal)
low[low <=threshold] = 1.
low[low > threshold] = 0.
high = -low+1.
if high.sum() < 10: # ignore high pinput that is too short
high *= 0.
return low, high
def to_mask(self, clipped):
clipped-=clipped.min()
clipped_max = clipped.max()
if clipped_max > 1e-10: # avoid division by zero
clipped/=clipped_max
return clipped
def rate_curve(self, rcin, inmax=500., outmax=800., rate=160.):
### an estimated rate curve. not used.
expoin = (np.exp((rcin - inmax) / rate) - np.exp((-rcin - inmax) / rate)) * outmax
return expoin
def tukeywin(self, len, alpha=0.5):
### makes tukey widow for envelopig
M = len
n = np.arange(M - 1.) #
if alpha <= 0:
return np.ones(M) # rectangular window
elif alpha >= 1:
return np.hanning(M)
# Normal case
x = np.linspace(0, 1, M, dtype=np.float64)
w = np.ones(x.shape)
# first condition 0 <= x < alpha/2
first_condition = x < alpha / 2
w[first_condition] = 0.5 * (1 + np.cos(2 * np.pi / alpha * (x[first_condition] - alpha / 2)))
# second condition already taken care of
# third condition 1 - alpha / 2 <= x <= 1
third_condition = x >= (1 - alpha / 2)
w[third_condition] = 0.5 * (1 + np.cos(2 * np.pi / alpha * (x[third_condition] - 1 + alpha / 2)))
return w
def toy_out(self, inp, delay=0.01, length=0.01, noise=5., mode='normal', sinfreq=100.):
# generates artificial output for benchmarking
freq= 1./(self.time[1]-self.time[0])
toyresp = np.zeros(int((delay+length)*freq))
toyresp[int((delay)*freq):]=1.
toyresp/=toyresp.sum()
toyout = np.convolve(inp, toyresp, mode='full')[:len(inp)]#*0.9
if mode=='normal':
noise_sig = (np.random.random_sample(len(toyout))-0.5)*noise
elif mode=='sin':
noise_sig = (np.sin(2.*np.pi*self.time*sinfreq)) * noise
else:
noise_sig=0.
return toyout+noise_sig
@staticmethod
def equalize_data(time, data):
"""Resample & interpolate all dict elements in data for equal sampling in time
:return: tuple of (time, data)
"""
newtime = np.linspace(time[0], time[-1], len(time), dtype=np.float64)
output = {}
for key in data:
output[key] = interp1d(time, data[key])(newtime)
return (newtime, output)
def stepcalc(self, time, duration):
### calculates frequency and resulting windowlength
tstep = (time[-1]-time[0])/len(time)
freq = 1./tstep
arr_len = duration * freq
return int(arr_len)
def winstacker(self, stackdict, flen, superpos):
### makes stack of windows for deconvolution
tlen = len(self.time)
shift = int(flen/superpos)
wins = int((tlen-flen)/shift)
for i in np.arange(wins):
for key in stackdict.keys():
stackdict[key].append(self.data[key][i * shift:i * shift + flen])
for k in stackdict.keys():
#print('key',k)
#print(len(stackdict[k]))
stackdict[k]=np.array(stackdict[k], dtype=np.float64)
return stackdict
def wiener_deconvolution(self, input, output, cutfreq): # input/output are two-dimensional
pad = 1024 - (len(input[0]) % 1024) # padding to power of 2, increases transform speed
input = np.pad(input, [[0,0],[0,pad]], mode='constant')
output = np.pad(output, [[0, 0], [0, pad]], mode='constant')
H = np.fft.fft(input, axis=-1)
G = np.fft.fft(output,axis=-1)
freq = np.abs(np.fft.fftfreq(len(input[0]), self.dt))
sn = self.to_mask(np.clip(np.abs(freq), cutfreq-1e-9, cutfreq))
len_lpf=np.sum(np.ones_like(sn)-sn)
sn=self.to_mask(gaussian_filter1d(sn,len_lpf/6.))
sn= 10.*(-sn+1.+1e-9) # +1e-9 to prohibit 0/0 situations
Hcon = np.conj(H)
deconvolved_sm = np.real(np.fft.ifft(G * Hcon / (H * Hcon + 1./sn),axis=-1))
return deconvolved_sm
def stack_response(self, stacks, window):
inp = stacks['input'] * window
outp = stacks['gyro'] * window
thr = stacks['throttle'] * window
deconvolved_sm = self.wiener_deconvolution(inp, outp, self.cutfreq)[:, :self.rlen]
delta_resp = deconvolved_sm.cumsum(axis=1)
max_thr = np.abs(np.abs(thr)).max(axis=1)
avr_in = np.abs(np.abs(inp)).mean(axis=1)
max_in = np.max(np.abs(inp), axis=1)
avr_t = stacks['time'].mean(axis=1)
return delta_resp, avr_t, avr_in, max_in, max_thr
def spectrum(self, time, traces):
### fouriertransform for noise analysis. returns frequencies and spectrum.
pad = 1024 - (len(traces[0]) % 1024) # padding to power of 2, increases transform speed
traces = np.pad(traces, [[0, 0], [0, pad]], mode='constant')
trspec = np.fft.rfft(traces, axis=-1, norm='ortho')
trfreq = np.fft.rfftfreq(len(traces[0]), time[1] - time[0])
return trfreq, trspec
def stackfilter(self, time, trace_ref, trace_filt, window):
### calculates filter transmission and phaseshift from stack of windows. Not in use, maybe later.
# slicing off last 2s to get rid of landing
#maybe pass throttle for further analysis...
filt = trace_filt[:-int(Trace.noise_superpos * 2. / Trace.noise_framelen), :] * window
ref = trace_ref[:-int(Trace.noise_superpos * 2. / Trace.noise_framelen), :] * window
time = time[:-int(Trace.noise_superpos * 2. / Trace.noise_framelen), :]
full_freq_f, full_spec_f = self.spectrum(self.time, [self.data['gyro']])
full_freq_r, full_spec_r = self.spectrum(self.time, [self.data['debug']])
f_amp_freq, f_amp_hist =np.histogram(full_freq_f, weights=np.abs(full_spec_f.real).flatten(), bins=int(full_freq_f[-1]))
r_amp_freq, r_amp_hist = np.histogram(full_freq_r, weights=np.abs(full_spec_r.real).flatten(), bins=int(full_freq_r[-1]))
def hist2d(self, x, y, weights, bins): #bins[nx,ny]
### generates a 2d hist from input 1d axis for x,y. repeats them to match shape of weights X*Y (data points)
### x will be 0-100%
freqs = np.repeat(np.array([y], dtype=np.float64), len(x), axis=0)
throts = np.repeat(np.array([x], dtype=np.float64), len(y), axis=0).transpose()
throt_hist_avr, throt_scale_avr = np.histogram(x, 101, [0, 100])
hist2d = np.histogram2d(throts.flatten(), freqs.flatten(),
range=[[0, 100], [y[0], y[-1]]],
bins=bins, weights=weights.flatten(), normed=False)[0].transpose()
hist2d = np.array(abs(hist2d), dtype=np.float64)
hist2d_norm = np.copy(hist2d)
hist2d_norm /= (throt_hist_avr + 1e-9)
return {'hist2d_norm':hist2d_norm, 'hist2d':hist2d, 'throt_hist':throt_hist_avr,'throt_scale':throt_scale_avr}
def stackspectrum(self, time, throttle, trace, window):
### calculates spectrogram from stack of windows against throttle.
# slicing off last 2s to get rid of landing
gyro = trace[:-int(Trace.noise_superpos*2./Trace.noise_framelen),:] * window
thr = throttle[:-int(Trace.noise_superpos*2./Trace.noise_framelen),:] * window
time = time[:-int(Trace.noise_superpos*2./Trace.noise_framelen),:]
freq, spec = self.spectrum(time[0], gyro)
weights = abs(spec.real)
avr_thr = np.abs(thr).max(axis=1)
hist2d=self.hist2d(avr_thr, freq,weights,[101,len(freq)/4])
filt_width = 3 # width of gaussian smoothing for hist data
hist2d_sm = gaussian_filter1d(hist2d['hist2d_norm'], filt_width, axis=1, mode='constant')
# get max value in histogram >100hz
thresh = 100.
mask = self.to_mask(freq[:-1:4].clip(thresh-1e-9,thresh))
maxval = np.max(hist2d_sm.transpose()*mask)
return {'throt_hist_avr':hist2d['throt_hist'],'throt_axis':hist2d['throt_scale'],'freq_axis':freq[::4],
'hist2d_norm':hist2d['hist2d_norm'], 'hist2d_sm':hist2d_sm, 'hist2d':hist2d['hist2d'], 'max':maxval}
def weighted_mode_avr(self, values, weights, vertrange, vertbins):
### finds the most common trace and std
threshold = 0.5 # threshold for std calculation
filt_width = 7 # width of gaussian smoothing for hist data
resp_y = np.linspace(vertrange[0], vertrange[-1], vertbins, dtype=np.float64)
times = np.repeat(np.array([self.time_resp],dtype=np.float64), len(values), axis=0)
weights = np.repeat(weights, len(values[0]))
hist2d = np.histogram2d(times.flatten(), values.flatten(),
range=[[self.time_resp[0], self.time_resp[-1]], vertrange],
bins=[len(times[0]), vertbins], weights=weights.flatten())[0].transpose()
### shift outer edges by +-1e-5 (10us) bacause of dtype32. Otherwise different precisions lead to artefacting.
### solution to this --> somethings strage here. In outer most edges some bins are doubled, some are empty.
### Hence sometimes produces "divide by 0 error" in "/=" operation.
if hist2d.sum():
hist2d_sm = gaussian_filter1d(hist2d, filt_width, axis=0, mode='constant')
hist2d_sm /= np.max(hist2d_sm, 0)
pixelpos = np.repeat(resp_y.reshape(len(resp_y), 1), len(times[0]), axis=1)
avr = np.average(pixelpos, 0, weights=hist2d_sm * hist2d_sm)
else:
hist2d_sm = hist2d
avr = np.zeros_like(self.time_resp)
# only used for monochrome error width
hist2d[hist2d <= threshold] = 0.
hist2d[hist2d > threshold] = 0.5 / (vertbins / (vertrange[-1] - vertrange[0]))
std = np.sum(hist2d, 0)
return avr, std, [self.time_resp, resp_y, hist2d_sm]
### calculates weighted avverage and resulting errors
def weighted_avg_and_std(self, values, weights):
average = np.average(values, axis=0, weights=weights)
variance = np.average((values - average) ** 2, axis=0, weights=weights)
return (average, np.sqrt(variance))
def plot_pid_response(trace, data, plot_config, label='Rate'):
"""Plot PID response for one axis
:param trace: Trace object
:param data: ULog.data_list
"""
def _color_palette(hue, N=20):
""" Color palette for the 2D histogram """
saturation = 0.75
vmin = 0.5
def sat(i, N, s):
""" get saturation: s if i < N/2, otherwise linearly increase
in range [s, 1] """
if i * 2 < N: return s
return s + (i-N/2) / (N/2) * (1-s)
hsv_tuples = [(hue, sat(N/2-x, N, saturation), vmin + x*(1-vmin)/N) for x in reversed(range(N))]
colors = []
alpha_max = 0.5
delta_alpha = alpha_max / N
alpha = 0
for rgb in hsv_tuples:
rgb = list(map(lambda x: int(x*255), colorsys.hsv_to_rgb(*rgb)))
colors.append('rgba({:.0f},{:.0f},{:.0f},{:.3f})'.format(rgb[0], rgb[1], rgb[2], alpha))
alpha += delta_alpha
return colors
data_plot = DataPlot(data, plot_config, 'sensor_combined',
y_axis_label='strength', x_axis_label='[s]',
title='Step Response for {:} {:}'.format(trace.name.capitalize(), label),
x_range=Range1d(0, trace.resplen),
y_range=Range1d(0, 2))
p = data_plot.bokeh_plot
color_mapper = LinearColorMapper(palette=_color_palette(0.55), low=0, high=1)
image = trace.resp_low[2][2] # 2D histogram
# y start and range comes from weighted_mode_avr(, , [-1.5, 3.5])
p.image([image], x=0, y=-1.5, dw=trace.resplen, dh=5, color_mapper=color_mapper)
has_high_rates = trace.high_mask.sum() > 0
low_rates_label = ''
if has_high_rates:
low_rates_label = ' (<500 deg/s)'
p.line(x=trace.time_resp, y=trace.resp_low[0],
legend_label=trace.name.capitalize() + low_rates_label,
line_width=4, line_color=colors3[2])
# Plotting a marker for the response time (first crossing of 1) looks nice, but
# does not necessarily mean much: on the same vehicle with the same gains, the
# (average) response time might be higher in one flight compared to another, if
# one of the flights contains generally lower rate setpoint jumps (e.g. when
# flying in acro vs. stabilized).
# # find & mark first crossing of 1
# response_time_idx = np.argmax(trace.resp_low[0] > 1)
# if response_time_idx > 0:
# # linearly interpolate to get a more accurate time
# t = [trace.time_resp[response_time_idx-1], trace.time_resp[response_time_idx]]
# y = [trace.resp_low[0][response_time_idx-1]-1, trace.resp_low[0][response_time_idx]-1]
# response_time = t[0] - y[0] * (t[0]-t[1]) / (y[0]-y[1])
# if response_time < 0.2: # only mark if it's sensible
# response_line = Span(location=response_time,
# dimension='height', line_color=colors3[2],
# line_dash='dashed', line_width=2)
# p.add_layout(response_line)
#
# y_values = [20]
# # add a space to separate it from the line
# names = [' {:.0f} ms'.format(response_time*1000)]
# source = ColumnDataSource(data={'x': np.array([response_time]),
# 'names':names, 'y': y_values})
# # plot as text with a fixed screen-space y offset
# labels = LabelSet(x='x', y='y', text='names',
# y_units='screen', level='glyph',
# text_color=colors3[2],
# source=source, render_mode='canvas')
# p.add_layout(labels)
if has_high_rates:
color_mapper = LinearColorMapper(palette=_color_palette(0.95), low=0, high=1)
image = trace.resp_high[2][2] # 2D histogram
# y start and range comes from weighted_mode_avr(, , [-1.5, 3.5])
p.image([image], x=0, y=-1.5, dw=trace.resplen, dh=5, color_mapper=color_mapper)
p.line(x=trace.time_resp, y=trace.resp_high[0],
legend_label=trace.name.capitalize() + ' (>500 deg/s)',
line_width=4, line_color=colors3[0])
# horizonal marker line at 1
data_span = Span(location=1,
dimension='width', line_color='black',
line_alpha=0.5, line_width=1.5)
p.add_layout(data_span)
data_plot.set_use_time_formatter(False)
data_plot.finalize()
return data_plot
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,367 | PX4/flight_review | refs/heads/main | /app/generate_overview_img.py | #! /usr/bin/env python3
import os
import sys
import sqlite3
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plot_app'))
from plot_app.overview_generator import generate_overview_img_from_id
from plot_app.config import get_db_filename
# get the logs (but only the public ones)
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute('SELECT Id FROM Logs WHERE Public = 1 ORDER BY Date DESC')
db_tuples = cur.fetchall()
for db_row in db_tuples:
log_id=db_row[0]
generate_overview_img_from_id(log_id)
cur.close()
con.close()
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,368 | PX4/flight_review | refs/heads/main | /app/plot_app/statistics_plots.py | """ Class for statistics plots page """
import functools
import re
import sqlite3
import datetime
import numpy as np
from bokeh.plotting import figure
from bokeh.palettes import viridis # alternatives: magma, inferno
from bokeh.models import (
DatetimeTickFormatter, FixedTicker, FuncTickFormatter,
HoverTool, ColumnDataSource, LabelSet #, CustomJS
)
from plotting import TOOLS, ACTIVE_SCROLL_TOOLS
from config import get_db_filename
from helper import get_airframe_data, flight_modes_table, get_sw_releases
#pylint: disable=too-few-public-methods,invalid-name,unused-argument,consider-using-enumerate
#pylint: disable=unsubscriptable-object
class _VersionData:
"""
class that contains various information for a single version
"""
def __init__(self):
self.boards = {} # flight durations per board
self.boards_num_logs = {} # num logs/flights per board
self.airframes = {} # flight durations per airframes
self.airframes_num_logs = {} # num logs/flights per airframes
self.ratings = {}
self.flight_mode_durations = {} # flight durations per flight mode
class _Log:
"""
container class containing a DB entry for one log
"""
re_version_extract = re.compile(r'v([0-9]+)\.([0-9]+)\.?([0-9]*)')
def __init__(self, db_tuple):
self.log_id = db_tuple[0]
self.date = db_tuple[1]
self.source = db_tuple[2]
self.is_public = db_tuple[3]
self.rating = db_tuple[4]
self.duration = 0
self.autostart_id = 0
self.hardware = ""
self.uuid = ""
self.sw_version = ""
self.flight_mode_durations = []
def set_generated(self, db_tuple):
""" set from a LogsGenerated DB tuple """
self.duration = db_tuple[1]
self.autostart_id = db_tuple[4]
self.hardware = db_tuple[5]
self.uuid = db_tuple[11]
# the version has typically the form 'v<i>.<j>.<k> <l>', where <l>
# indicates whether it's a development version (most of the time it's 0)
self.sw_version = db_tuple[10].split(' ')[0]
self.flight_mode_durations = \
[tuple(map(int, x.split(':'))) for x in db_tuple[12].split(',') if len(x) > 0]
@staticmethod
def compare_version(ver_a, ver_b):
"""
compare version strings
"""
# if the version is not set, it should be last
if ver_a == '': return 1
if ver_b == '': return -1
versions = [ver_a, ver_b]
version_tuples = []
for version in versions:
m = _Log.re_version_extract.match(version)
if m:
patch = 0
if len(m.groups()) == 3:
patch = m.group(3)
version_tuples.append((m.group(1), m.group(2), patch))
if len(version_tuples) != 2:
return -1
for i in range(3):
if version_tuples[0][i] < version_tuples[1][i]:
return -1
if version_tuples[0][i] > version_tuples[1][i]:
return 1
return 0
class StatisticsPlots:
"""
Class to generate statistics plots from Database entries
"""
def __init__(self, plot_config, verbose_output=False):
self._config = plot_config
self._verbose_output = verbose_output
# lists of dates when a _log was uploaded, one list per type
self._public_logs_dates = []
self._private_logs_dates = []
self._ci_logs_dates = []
self._all_logs_dates = []
self._public_logs = []
# read from the DB
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
with con:
cur = con.cursor()
cur.execute('select Id, Date, Source, Public, Rating from Logs')
db_tuples = cur.fetchall()
for db_tuple in db_tuples:
log = _Log(db_tuple)
self._all_logs_dates.append(log.date)
if log.is_public == 1:
if log.source == 'CI':
self._ci_logs_dates.append(log.date)
else:
self._public_logs_dates.append(log.date)
else:
if log.source == 'CI':
self._ci_logs_dates.append(log.date)
else:
self._private_logs_dates.append(log.date)
# LogsGenerated: public only
if log.is_public != 1 or log.source == 'CI':
continue
cur.execute('select * from LogsGenerated where Id = ?', [log.log_id])
db_tuple = cur.fetchone()
if db_tuple is None:
print("Error: no generated data")
continue
log.set_generated(db_tuple)
# filter bogus entries
if log.sw_version == 'v0.0.0':
if self._verbose_output:
print('Warning: %s with version=v0.0.0' % log.log_id)
continue
if log.duration > 7*24*3600: # probably bogus timestamp(s)
if self._verbose_output:
print('Warning: %s with very high duration %i' %
(log.log_id, log.duration))
continue
if log.sw_version == '':
# FIXME: does that still occur and if so why?
if self._verbose_output:
print('Warning: %s version not set' % log.log_id)
continue
if log.autostart_id == 0:
print('Warning: %s with autostart_id=0' % log.log_id)
continue
try:
ver_major = int(log.sw_version[1:].split('.')[0])
if ver_major >= 2 or ver_major == 0:
print('Warning: %s with large/small version %s' %
(log.log_id, log.sw_version))
continue
except:
continue
self._public_logs.append(log)
self._version_data = {} # dict of _VersionData items
self._all_airframes = set()
self._all_boards = set()
self._all_ratings = set()
self._all_flight_modes = set()
self._total_duration = 0 # in hours, public logs only
self._total_last_version_duration = 0 # in hours, public logs only
self._latest_major_release = ""
for log in self._public_logs:
if not log.sw_version in self._version_data:
self._version_data[log.sw_version] = _VersionData()
self._all_airframes.add(str(log.autostart_id))
self._all_boards.add(log.hardware)
self._all_ratings.add(log.rating)
cur_version_data = self._version_data[log.sw_version]
boards = cur_version_data.boards
boards_num_logs = cur_version_data.boards_num_logs
airframes = cur_version_data.airframes
airframes_num_logs = cur_version_data.airframes_num_logs
ratings = cur_version_data.ratings
flight_modes = cur_version_data.flight_mode_durations
if not log.hardware in boards:
boards[log.hardware] = 0
boards_num_logs[log.hardware] = 0
boards[log.hardware] += log.duration / 3600.
boards_num_logs[log.hardware] += 1
for flight_mode, duration in log.flight_mode_durations:
flight_mode_str = str(flight_mode)
self._all_flight_modes.add(flight_mode_str)
if not flight_mode_str in flight_modes:
flight_modes[flight_mode_str] = 0.
flight_modes[flight_mode_str] += duration / 3600.
autostart_str = str(log.autostart_id)
if not autostart_str in airframes:
airframes[autostart_str] = 0
airframes_num_logs[autostart_str] = 0
airframes[autostart_str] += log.duration / 3600.
airframes_num_logs[autostart_str] += 1
if not log.rating in ratings:
ratings[log.rating] = 0
ratings[log.rating] += 1
self._total_duration += log.duration / 3600.
if len(self._version_data) > 0:
latest_version = sorted(
self._version_data, key=functools.cmp_to_key(_Log.compare_version))[-1]
latest_major_version = latest_version.split('.')[0:2]
self._latest_major_release = '.'.join(latest_major_version)
for log in self._public_logs:
if log.sw_version.split('.')[0:2] == latest_major_version:
self._total_last_version_duration += log.duration / 3600.
def num_logs_total(self):
""" get the total number of logs on the server """
return len(self._all_logs_dates)
def num_logs_ci(self):
""" get the total number of CI logs on the server """
return len(self._ci_logs_dates)
def plot_log_upload_statistics(self, colors):
"""
plot upload statistics for different upload types. Each type is a list of
datetime of a single upload
:param colors: list of 5 colors
:return: bokeh plot
"""
title = 'Number of Log Files on the Server'
p = figure(title=title, x_axis_label=None,
y_axis_label=None, tools=TOOLS,
active_scroll=ACTIVE_SCROLL_TOOLS)
def plot_dates(p, dates_list, last_date, legend, color):
""" plot a single line from a list of dates """
counts = np.arange(1, len(dates_list)+1)
# subsample
dates_list_subsampled = []
counts_subsampled = []
previous_timestamp = 0
for date, count in zip(dates_list, counts):
t = int(date.timestamp()/(3600*4)) # use a granularity of 4 hours
if t != previous_timestamp:
previous_timestamp = t
dates_list_subsampled.append(date)
counts_subsampled.append(count)
if len(counts_subsampled) > 0:
if dates_list_subsampled[-1] < last_date:
# make sure the plot line extends to the last date
counts_subsampled.append(counts_subsampled[-1])
dates_list_subsampled.append(last_date)
p.line(dates_list_subsampled, counts_subsampled,
legend_label=legend, line_width=2, line_color=color)
if len(self._all_logs_dates) > 0:
last_date = self._all_logs_dates[-1]
# compared to the others, there are many more CI logs, making it hard to
# see the others
#plot_dates(p, self._all_logs_dates, last_date, 'Total', colors[0])
#plot_dates(p, self._ci_logs_dates, last_date,
# 'Continuous Integration (Simulation Tests)', colors[1])
plot_dates(p, self._private_logs_dates, last_date, 'Private', colors[2])
plot_dates(p, self._public_logs_dates, last_date, 'Public', colors[4])
p.xaxis.formatter = DatetimeTickFormatter(
hours=["%d %b %Y %H:%M"],
days=["%d %b %Y"],
months=["%d %b %Y"],
years=["%d %b %Y"],
)
# show the release versions as text markers
release_dict = {'dates': [], 'tags': [], 'y': [], 'y_offset': []}
max_logs_dates = self._public_logs_dates # defines range limits of the plot
if len(max_logs_dates) > 0:
first_date = max_logs_dates[0]
y_max = max(len(max_logs_dates), len(self._private_logs_dates))
y_pos = -y_max*0.08
releases = get_sw_releases()
if releases:
y_offset = True
for release in reversed(releases):
tag = release['tag_name']
release_date_str = release['published_at']
release_date = datetime.datetime.strptime(release_date_str,
"%Y-%m-%dT%H:%M:%SZ")
if release_date > first_date and not 'rc' in tag.lower() \
and not 'beta' in tag.lower():
release_dict['dates'].append(release_date)
release_dict['tags'].append(tag)
release_dict['y'].append(y_pos)
if y_offset:
release_dict['y_offset'].append(5)
else:
release_dict['y_offset'].append(-18)
y_offset = not y_offset
if len(release_dict['dates']) > 0:
source = ColumnDataSource(data=release_dict)
x = p.scatter(x='dates', y='y', size=4, source=source, color='#000000')
labels = LabelSet(x='dates', y='y',
text='tags', level='glyph',
x_offset=2, y_offset='y_offset', source=source,
text_font_size="10pt")
p.add_layout(labels)
# fixate the y position within the graph (screen coordinates).
# the y_units='screen' does not work for p.scatter
jscode = """
var data = source.get('data');
var start = cb_obj.get('start');
var end = cb_obj.get('end');
data_start = start + (end - start) * 0.05;
for (var i = 0; i < data['y'].length; ++i) {
data['y'][i] = data_start;
}
source.trigger('change');
"""
# FIXME: this is broken on bokeh 0.12.12
# p.y_range.callback = CustomJS(args={'source': source}, code=jscode)
self._setup_plot(p, 'large')
return p
def total_public_flight_duration(self):
""" get total public flight hours """
return self._total_duration
def total_public_flight_duration_latest_release(self):
""" get total public flight hours for the latest major release (includes
all minor releases & RC candidates. """
return self._total_last_version_duration
def latest_major_release(self):
""" get the version of the latest major release in the form 'v1.2'. """
return self._latest_major_release
def plot_public_boards_statistics(self):
"""
plot board flight hour statistics for each version, for public logs
:return: bokeh plot
"""
return self._plot_public_data_statistics(
self._all_boards, 'boards', 'Board', lambda x, short: x)
def plot_public_boards_num_flights_statistics(self):
"""
plot board number of flights statistics for each version, for public logs
:return: bokeh plot
"""
return self._plot_public_data_statistics(
self._all_boards, 'boards_num_logs', 'Board', lambda x, short: x, False)
def plot_public_airframe_statistics(self):
"""
plot airframe flight hour statistics for each version, for public logs
:return: bokeh plot
"""
def label_callback(airframe_id, short):
""" get the airframe label for the sys_autostart id """
if short:
return airframe_id
airframe_data = get_airframe_data(airframe_id)
if airframe_data is None:
airframe_label = airframe_id
else:
airframe_type = ''
if 'type' in airframe_data:
airframe_type = ', '+airframe_data['type']
airframe_label = airframe_data.get('name')+ \
airframe_type+' ('+airframe_id+')'
return airframe_label
return self._plot_public_data_statistics(
self._all_airframes, 'airframes', 'Airframe', label_callback)
def plot_public_flight_mode_statistics(self):
"""
plot flight mode statistics for each version, for public logs
:return: bokeh plot
"""
def label_callback(flight_mode, short):
""" get flight mode as string from an integer value """
try:
return flight_modes_table[int(flight_mode)][0]
except:
return 'Unknown'
return self._plot_public_data_statistics(
self._all_flight_modes, 'flight_mode_durations', 'Flight Mode', label_callback)
def _plot_public_data_statistics(self, all_data, version_attr_name,
title_name, label_cb, is_flight_hours=True):
"""
generic method to plot flight hours one data type
:param all_data: list with all types as string
:param version_attr_name: attribute name of _VersionData
:param title_name: name of the data for the title (and hover tool)
:param label_cb: callback to create the label
:param is_flight_hours: if True, this shows the flight hours, nr of flights otherwise
:return: bokeh plot
"""
if is_flight_hours:
title_prefix = 'Flight hours'
else:
title_prefix = 'Number of Flights'
# change data structure
data_hours = {} # key=data id, value=list of hours for each version
for d in all_data:
data_hours[d] = []
versions = [] # sorted list of all versions
for ver in sorted(self._version_data, key=functools.cmp_to_key(_Log.compare_version)):
versions.append(ver)
# all data points of the requested type for this version
version_type_data = getattr(self._version_data[ver],
version_attr_name)
for d in all_data:
if not d in version_type_data:
version_type_data[d] = 0.
data_hours[d].append(version_type_data[d])
# cumulative over each version
for key in all_data:
data_hours[key] = np.array(data_hours[key])
data_hours[key+"_cum"] = np.cumsum(data_hours[key])
# create a 2D numpy array. We could directly pass the dict to the bokeh
# plot, but then we don't have control over the sorting order
X = np.zeros((len(all_data), len(versions)))
i = 0
all_data_sorted = []
for key in sorted(all_data, key=lambda data_key: data_hours[data_key+"_cum"][-1]):
X[i, :] = data_hours[key+"_cum"]
all_data_sorted.append(key)
i += 1
all_data = all_data_sorted
colors = viridis(len(all_data))
area = figure(title=title_prefix+" per "+title_name, tools=TOOLS,
active_scroll=ACTIVE_SCROLL_TOOLS,
x_axis_label='version (including development states)',
y_axis_label='')
# stack the data: we'll need it for the hover tool & the patches
last = np.zeros(len(versions))
stacked_patches = [] # polygon y positions: one per data item
for i in range(len(all_data)):
next_data = last + X[i, :]
# for the stacked patches, we store a polygon: left-to-right, then right-to-left
stacked_patches.append(np.hstack((last[::-1], next_data)))
data_hours[all_data[i]+'_stacked'] = next_data
last = next_data
data_hours['x'] = np.arange(len(versions))
# group minor versions closer together by manipulating the x-position
# (we could use the release dates but we don't have that information for
# all versions)
grouping_factor = 3 # higher=stronger grouping, 0=disabled
versions_spaced = []
if len(versions) > 0:
prev_version = versions[0]
for i in range(len(versions)):
version = versions[i]
if prev_version.split('.')[0:2] == version.split('.')[0:2]:
version_display = 'x.'+version.split('.')[2]
else:
versions_spaced.extend(['']*grouping_factor)
version_display = version
data_hours['x'][i] = len(versions_spaced)
versions_spaced.append(version_display)
prev_version = version
# hover tool
if is_flight_hours:
str_format = '{0,0.0}'
else:
str_format = '{0,0}'
source = ColumnDataSource(data=data_hours)
for d in all_data:
renderer = area.circle(x='x', y=d+'_stacked', source=source,
size=10, alpha=0, name=d)
g1_hover = HoverTool(
renderers=[renderer],
tooltips=[(title_name, label_cb(d, True)),
(title_prefix+' (only this version)', '@'+d+str_format),
(title_prefix+' (up to this version)', '@'+d+'_cum'+str_format)])
area.add_tools(g1_hover)
# now plot the patches (polygons)
x = data_hours['x']
x2 = np.hstack((x[::-1], x))
for i in range(len(all_data)):
area.patch(x2, stacked_patches[i], color=colors[i], # pylint: disable=too-many-function-args
legend_label=label_cb(all_data[i], False), alpha=0.8, line_color=None)
if area.legend:
area.legend[0].items.reverse()
area.xaxis.formatter = FuncTickFormatter(code="""
var versions = """ + str(versions_spaced) + """;
return versions[Math.floor(tick)]
""")
area.xaxis.ticker = FixedTicker(ticks=list(data_hours['x']))
# decrease size a bit to fit all items
area.legend.label_text_font_size = '8pt'
area.legend.label_height = 8
area.legend.glyph_height = 10
self._setup_plot(area)
return area
def _setup_plot(self, p, plot_height='normal'):
""" apply layout options to a bokeh plot """
plots_width = self._config['plot_width']
plots_height = self._config['plot_height'][plot_height]
p.plot_width = plots_width
p.plot_height = plots_height
p.xgrid.grid_line_color = 'navy'
p.xgrid.grid_line_alpha = 0.13
p.ygrid.grid_line_color = 'navy'
p.ygrid.grid_line_alpha = 0.13
p.legend.location = "top_left"
p.toolbar.logo = None
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,369 | PX4/flight_review | refs/heads/main | /app/plot_app/colors.py | """ color helper functions """
#pylint: disable=invalid-name
import colorsys
def get_N_colors(N, s=0.8, v=0.9):
""" get N distinct colors as a list of hex strings """
HSV_tuples = [(x*1.0/N, s, v) for x in range(N)]
hex_out = []
for rgb in HSV_tuples:
rgb = map(lambda x: int(x*255), colorsys.hsv_to_rgb(*rgb))
hex_out.append("#"+"".join(map(lambda x: format(x, '02x'), rgb)))
return hex_out
def HTML_color_to_RGB(html_color):
""" convert a HTML string color (eg. '#4422aa') into an RGB list (range 0-255)
"""
if html_color[0] == '#': html_color = html_color[1:]
r, g, b = html_color[:2], html_color[2:4], html_color[4:]
return [int(n, 16) for n in (r, g, b)]
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,370 | PX4/flight_review | refs/heads/main | /app/plot_app/config_tables.py | """ configuration tables """
#pylint: disable=invalid-name
flight_modes_table = {
0: ('Manual', '#cc0000'), # red
1: ('Altitude', '#eecc00'), # yellow
2: ('Position', '#00cc33'), # green
10: ('Acro', '#66cc00'), # olive
14: ('Offboard', '#00cccc'), # light blue
15: ('Stabilized', '#0033cc'), # dark blue
16: ('Rattitude', '#ee9900'), # orange
# all AUTO-modes use the same color
3: ('Mission', '#6600cc'), # purple
4: ('Loiter', '#6600cc'), # purple
5: ('Return to Land', '#6600cc'), # purple
6: ('RC Recovery', '#6600cc'), # purple
7: ('Return to groundstation', '#6600cc'), # purple
8: ('Land (engine fail)', '#6600cc'), # purple
9: ('Land (GPS fail)', '#6600cc'), # purple
12: ('Descend', '#6600cc'), # purple
13: ('Terminate', '#6600cc'), # purple
17: ('Takeoff', '#6600cc'), # purple
18: ('Land', '#6600cc'), # purple
19: ('Follow Target', '#6600cc'), # purple
20: ('Precision Land', '#6600cc'), # purple
21: ('Orbit', '#6600cc'), # purple
}
vtol_modes_table = {
1: ('Transition', '#cc0000'), # red
2: ('Fixed-Wing', '#eecc00'), # yellow
3: ('Multicopter', '#0033cc'), # dark blue
}
error_labels_table = {
# the labels (values) have to be capitalized!
# 'validate_error_labels_and_get_ids' will return an error otherwise
1: 'Other',
2: 'Vibration',
3: 'Airframe-design',
4: 'Sensor-error',
5: 'Component-failure',
6: 'Software',
7: 'Human-error',
8: 'External-conditions'
# Note: when adding new labels, always increase the id, never re-use a lower value
}
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,371 | PX4/flight_review | refs/heads/main | /app/plot_app/vtol_tailsitter.py | """ VTOL tailsitter attitude and rate correction code """
from scipy.spatial.transform import Rotation as Rot
import numpy as np
def tailsitter_orientation(ulog, vtol_states):
"""
corrections for VTOL tailsitter attitude and rates
tailsitter uses 90 degree rotation in pitch which is hardcoded in
rather than consistently reported in estimated and setpoint
use setpoint values as ground truth here and correct estimated by 90 degrees
rates also need yaw and roll swapped with a -1 on roll axis
"""
# correct attitudes for VTOL tailsitter in FW mode
try:
cur_dataset = ulog.get_dataset('vehicle_attitude')
quat_0 = cur_dataset.data['q[0]']
quat_1 = cur_dataset.data['q[1]']
quat_2 = cur_dataset.data['q[2]']
quat_3 = cur_dataset.data['q[3]']
quat_t = cur_dataset.data['timestamp']
rotations = Rot.from_quat(np.transpose(np.asarray([quat_0, quat_1, quat_2, quat_3])))
rpy = rotations.as_euler('xyz', degrees=True)
# rotate by -90 degrees pitch in quaternion form to avoid singularity
fw_rotation = Rot.from_euler('y', -90, degrees=True)
rpy_fw = (fw_rotation*rotations).as_euler('xyz', degrees=True)
# convert out into separate variables
roll = np.deg2rad(rpy[:, 2])
pitch = np.deg2rad(-1*rpy[:, 1])
yaw = -180-rpy[:, 0]
yaw[yaw > 180] = yaw[yaw > 180]-360
yaw[yaw < -180] = yaw[yaw < -180]+360
yaw = np.deg2rad(yaw)
roll_fw = np.deg2rad(rpy_fw[:, 2])
pitch_fw = np.deg2rad(-1*rpy_fw[:, 1])
yaw_fw = -180-rpy_fw[:, 0]
yaw_fw[yaw_fw > 180] = yaw_fw[yaw_fw > 180]-360
yaw_fw[yaw_fw < -180] = yaw_fw[yaw_fw < -180]+360
yaw_fw = np.deg2rad(yaw_fw)
# temporary variables for storing VTOL states
is_vtol_fw = False
fw_start = np.nan
fw_end = np.nan
for i in vtol_states:
# states: 1=transition, 2=FW, 3=MC
# if in FW mode then used FW conversions
if is_vtol_fw:
fw_end = i[0]
roll[np.logical_and(quat_t > fw_start, quat_t < fw_end)] = \
roll_fw[np.logical_and(quat_t > fw_start, quat_t < fw_end)]
pitch[np.logical_and(quat_t > fw_start, quat_t < fw_end)] = \
pitch_fw[np.logical_and(quat_t > fw_start, quat_t < fw_end)]
yaw[np.logical_and(quat_t > fw_start, quat_t < fw_end)] = \
yaw_fw[np.logical_and(quat_t > fw_start, quat_t < fw_end)]
is_vtol_fw = False
if i[1] == 2:
fw_start = i[0]
is_vtol_fw = True
# if flight ended as FW, convert the final data segment to FW
if is_vtol_fw:
roll[quat_t > fw_start] = roll_fw[quat_t > fw_start]
pitch[quat_t > fw_start] = pitch_fw[quat_t > fw_start]
yaw[quat_t > fw_start] = yaw_fw[quat_t > fw_start]
vtol_attitude = {'roll': roll, 'pitch': pitch, 'yaw': yaw}
except (KeyError, IndexError) as error:
vtol_attitude = {'roll': None, 'pitch': None, 'yaw': None}
# correct angular rates for VTOL tailsitter in FW mode
try:
cur_dataset = ulog.get_dataset('vehicle_angular_velocity')
w_r = cur_dataset.data['xyz[0]']
w_p = cur_dataset.data['xyz[1]']
w_y = cur_dataset.data['xyz[2]']
w_t = cur_dataset.data['timestamp']
# fw rates (roll and yaw swap, roll is negative axis)
w_r_fw = w_y*-1
w_y_fw = w_r*1 # *1 to get python to copy not reference
# temporary variables for storing VTOL states
is_vtol_fw = False
fw_start = np.nan
fw_end = np.nan
for i in vtol_states:
# states: 1=transition, 2=FW, 3=MC
# if in FW mode then used FW conversions
if is_vtol_fw:
fw_end = i[0]
w_r[np.logical_and(w_t > fw_start, w_t < fw_end)] = \
w_r_fw[np.logical_and(w_t > fw_start, w_t < fw_end)]
w_y[np.logical_and(w_t > fw_start, w_t < fw_end)] = \
w_y_fw[np.logical_and(w_t > fw_start, w_t < fw_end)]
is_vtol_fw = False
if i[1] == 2:
fw_start = i[0]
is_vtol_fw = True
# if flight ended as FW, convert the final data segment to FW
if is_vtol_fw:
w_r[quat_t > fw_start] = w_r_fw[quat_t > fw_start]
w_y[quat_t > fw_start] = w_y_fw[quat_t > fw_start]
vtol_rates = {'roll': w_r, 'pitch': w_p, 'yaw': w_y}
except (KeyError, IndexError) as error:
vtol_rates = {'roll': None, 'pitch': None, 'yaw': None}
return [vtol_attitude, vtol_rates]
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,372 | PX4/flight_review | refs/heads/main | /app/tornado_handlers/send_email.py | """ Methods for sending notification emails """
from __future__ import print_function
import sys
import os
from smtplib import SMTP_SSL as SMTP # this invokes the secure SMTP protocol
# (port 465, uses SSL)
# from smtplib import SMTP # use this for standard SMTP protocol
# (port 25, no encryption)
from email.mime.text import MIMEText
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plot_app'))
from config import *
def send_notification_email(email_address, plot_url, delete_url, info):
""" send a notification email after uploading a plot
:param info: dictionary with additional info
"""
if email_address == '':
return True
description = info['description']
if description == '':
description = info['airframe']
if 'vehicle_name' in info:
description = "{:} - {:}".format(description, info['vehicle_name'])
subject = "Log File uploaded ({:})".format(description)
if len(subject) > 78: # subject should not be longer than that
subject = subject[:78]
destination = [email_address]
content = """\
Hi there!
Your uploaded log file is available under:
{plot_url}
Description: {description}
Feedback: {feedback}
Vehicle type: {type}
Airframe: {airframe}
Hardware: {hardware}
Vehicle UUID: {uuid}
Software git hash: {software}
Upload file name: {upload_filename}
Use the following link to delete the log:
{delete_url}
""".format(plot_url=plot_url, delete_url=delete_url, **info)
return _send_email(destination, subject, content)
def send_flightreport_email(destination, plot_url, rating_description,
wind_speed, delete_url, uploader_email, info):
""" send notification email for a flight report upload """
if len(destination) == 0:
return True
content = """\
Hi
A new flight report just got uploaded:
{plot_url}
Description: {description}
Feedback: {feedback}
Rating: {rating_description}
Wind Speed: {wind_speed}
Uploader: {uploader_email}
Vehicle type: {type}
Airframe: {airframe}
Hardware: {hardware}
Vehicle UUID: {uuid}
Software git hash: {software}
Use the following link to delete the log:
{delete_url}
""".format(plot_url=plot_url,
rating_description=rating_description, wind_speed=wind_speed,
delete_url=delete_url, uploader_email=uploader_email, **info)
description = info['description']
if description == '':
description = info['airframe']
if 'vehicle_name' in info:
description = "{:} - {:}".format(description, info['vehicle_name'])
subject = "Flight Report uploaded ({:})".format(description)
if info['rating'] == 'crash_sw_hw':
subject = '[CRASH] '+subject
if len(subject) > 78: # subject should not be longer than that
subject = subject[:78]
return _send_email(destination, subject, content)
def _send_email(destination, subject, content):
""" common method for sending an email to one or more destinations """
# typical values for text_subtype are plain, html, xml
text_subtype = 'plain'
try:
msg = MIMEText(content, text_subtype)
msg['Subject'] = subject
sender = email_config['sender']
msg['From'] = sender # some SMTP servers will do this automatically
conn = SMTP(email_config['smtpserver'], timeout=15)
conn.set_debuglevel(False)
conn.login(email_config['user_name'], email_config['password'])
try:
conn.sendmail(sender, destination, msg.as_string())
finally:
conn.quit()
except Exception as exc:
print("mail failed; {:}".format(str(exc)))
return False
return True
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,373 | PX4/flight_review | refs/heads/main | /app/tornado_handlers/three_d.py | """
Tornado handler for the 3D page
"""
from __future__ import print_function
import datetime
import os
import sys
import tornado.web
import numpy as np
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../plot_app'))
from config import get_bing_maps_api_key, get_cesium_api_key
from helper import validate_log_id, get_log_filename, load_ulog_file, \
get_flight_mode_changes, flight_modes_table, get_lat_lon_alt_deg
#pylint: disable=relative-beyond-top-level
from .common import get_jinja_env, CustomHTTPError, TornadoRequestHandlerBase
THREED_TEMPLATE = '3d.html'
#pylint: disable=abstract-method, unused-argument
class ThreeDHandler(TornadoRequestHandlerBase):
""" Tornado Request Handler to render the 3D Cesium.js page """
def get(self, *args, **kwargs):
""" GET request callback """
# load the log file
log_id = self.get_argument('log')
if not validate_log_id(log_id):
raise tornado.web.HTTPError(400, 'Invalid Parameter')
log_file_name = get_log_filename(log_id)
ulog = load_ulog_file(log_file_name)
# extract the necessary information from the log
try:
# required topics: none of these are optional
gps_pos = ulog.get_dataset('vehicle_gps_position')
attitude = ulog.get_dataset('vehicle_attitude').data
except (KeyError, IndexError, ValueError) as error:
raise CustomHTTPError(
400,
'The log does not contain all required topics<br />'
'(vehicle_gps_position, vehicle_global_position, '
'vehicle_attitude)') from error
# manual control setpoint is optional
manual_control_setpoint = None
try:
manual_control_setpoint = ulog.get_dataset('manual_control_setpoint').data
except (KeyError, IndexError, ValueError) as error:
pass
lat, lon, alt = get_lat_lon_alt_deg(ulog, gps_pos)
# Get the takeoff location. We use the first position with a valid fix,
# and assume that the vehicle is not in the air already at that point
takeoff_index = 0
gps_indices = np.nonzero(gps_pos.data['fix_type'] > 2)
if len(gps_indices[0]) > 0:
takeoff_index = gps_indices[0][0]
takeoff_altitude = '{:.3f}' .format(alt[takeoff_index])
takeoff_latitude = '{:.10f}'.format(lat[takeoff_index])
takeoff_longitude = '{:.10f}'.format(lon[takeoff_index])
# calculate UTC time offset (assume there's no drift over the entire log)
utc_offset = int(gps_pos.data['time_utc_usec'][takeoff_index]) - \
int(gps_pos.data['timestamp'][takeoff_index])
# flight modes
flight_mode_changes = get_flight_mode_changes(ulog)
flight_modes_str = '[ '
for t, mode in flight_mode_changes:
t += utc_offset
utctimestamp = datetime.datetime.utcfromtimestamp(t/1.e6).replace(
tzinfo=datetime.timezone.utc)
if mode in flight_modes_table:
mode_name, color = flight_modes_table[mode]
else:
mode_name = ''
color = '#ffffff'
flight_modes_str += '["{:}", "{:}"], ' \
.format(utctimestamp.isoformat(), mode_name)
flight_modes_str += ' ]'
# manual control setpoints (stick input)
manual_control_setpoints_str = '[ '
if manual_control_setpoint:
for i in range(len(manual_control_setpoint['timestamp'])):
if 'throttle' in manual_control_setpoint:
manual_x = manual_control_setpoint['pitch'][i]
manual_y = manual_control_setpoint['roll'][i]
manual_z = manual_control_setpoint['throttle'][i]
manual_r = manual_control_setpoint['yaw'][i]
else: # COMPATIBILITY support for old logs (PX4/PX4-Autopilot/pull/15949)
manual_x = manual_control_setpoint['x'][i]
manual_y = manual_control_setpoint['y'][i]
manual_z = manual_control_setpoint['z'][i] * 2 - 1
manual_r = manual_control_setpoint['r'][i]
t = manual_control_setpoint['timestamp'][i] + utc_offset
utctimestamp = datetime.datetime.utcfromtimestamp(t/1.e6).replace(
tzinfo=datetime.timezone.utc)
manual_control_setpoints_str += '["{:}", {:.3f}, {:.3f}, {:.3f}, {:.3f}], ' \
.format(utctimestamp.isoformat(), manual_x, manual_y, manual_z, manual_r)
manual_control_setpoints_str += ' ]'
# position
# Note: altitude_ellipsoid_m from gps_pos would be the better match for
# altitude, but it's not always available. And since we add an offset
# (to match the takeoff location with the ground altitude) it does not
# matter as much.
position_data = '[ '
# TODO: use vehicle_global_position? If so, then:
# - altitude requires an offset (to match the GPS data)
# - it's worse for some logs where the estimation is bad -> acro flights
# (-> add both: user-selectable between GPS & estimated trajectory?)
for i in range(len(gps_pos.data['timestamp'])):
t = gps_pos.data['timestamp'][i] + utc_offset
utctimestamp = datetime.datetime.utcfromtimestamp(t/1.e6).replace(
tzinfo=datetime.timezone.utc)
if i == 0:
start_timestamp = utctimestamp
end_timestamp = utctimestamp
position_data += '["{:}", {:.10f}, {:.10f}, {:.3f}], ' \
.format(utctimestamp.isoformat(), lon[i], lat[i], alt[i])
position_data += ' ]'
start_timestamp_str = '"{:}"'.format(start_timestamp.isoformat())
boot_timestamp = datetime.datetime.utcfromtimestamp(utc_offset/1.e6).replace(
tzinfo=datetime.timezone.utc)
boot_timestamp_str = '"{:}"'.format(boot_timestamp.isoformat())
end_timestamp_str = '"{:}"'.format(end_timestamp.isoformat())
# orientation as quaternion
attitude_data = '[ '
for i in range(len(attitude['timestamp'])):
att_qw = attitude['q[0]'][i]
att_qx = attitude['q[1]'][i]
att_qy = attitude['q[2]'][i]
att_qz = attitude['q[3]'][i]
t = attitude['timestamp'][i] + utc_offset
utctimestamp = datetime.datetime.utcfromtimestamp(t/1.e6).replace(
tzinfo=datetime.timezone.utc)
# Cesium uses (x, y, z, w)
attitude_data += '["{:}", {:.6f}, {:.6f}, {:.6f}, {:.6f}], ' \
.format(utctimestamp.isoformat(), att_qx, att_qy, att_qz, att_qw)
attitude_data += ' ]'
# handle different vehicle types
# the model_scale_factor should scale the different models to make them
# equal in size (in proportion)
mav_type = ulog.initial_parameters.get('MAV_TYPE', None)
if mav_type == 1: # fixed wing
model_scale_factor = 0.06
model_uri = 'plot_app/static/cesium/SampleData/models/CesiumAir/Cesium_Air.glb'
elif mav_type == 7: # Airship, controlled
model_scale_factor = 0.1
model_uri = 'plot_app/static/cesium/SampleData/models/CesiumBalloon/CesiumBalloon.glb'
elif mav_type == 8: # Free balloon, uncontrolled
model_scale_factor = 0.1
model_uri = 'plot_app/static/cesium/SampleData/models/CesiumBalloon/CesiumBalloon.glb'
elif mav_type == 2: # quad
model_scale_factor = 1
model_uri = 'plot_app/static/cesium/models/iris/iris.glb'
elif mav_type == 22: # delta-quad
# TODO: use the delta-quad model
model_scale_factor = 0.06
model_uri = 'plot_app/static/cesium/SampleData/models/CesiumAir/Cesium_Air.glb'
else: # TODO: handle more types
model_scale_factor = 1
model_uri = 'plot_app/static/cesium/models/iris/iris.glb'
template = get_jinja_env().get_template(THREED_TEMPLATE)
self.write(template.render(
flight_modes=flight_modes_str,
manual_control_setpoints=manual_control_setpoints_str,
takeoff_altitude=takeoff_altitude,
takeoff_longitude=takeoff_longitude,
takeoff_latitude=takeoff_latitude,
position_data=position_data,
start_timestamp=start_timestamp_str,
boot_timestamp=boot_timestamp_str,
end_timestamp=end_timestamp_str,
attitude_data=attitude_data,
model_scale_factor=model_scale_factor,
model_uri=model_uri,
log_id=log_id,
bing_api_key=get_bing_maps_api_key(),
cesium_api_key=get_cesium_api_key()))
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,374 | PX4/flight_review | refs/heads/main | /app/plot_app/plotting.py | """ methods an classes used for plotting (wrappers around bokeh plots) """
import copy
from bokeh.plotting import figure
#pylint: disable=line-too-long, arguments-differ, unused-import
from bokeh.models import (
ColumnDataSource, Range1d, DataRange1d, DatetimeAxis,
TickFormatter, DatetimeTickFormatter, FuncTickFormatter,
Grid, Legend, Plot, BoxAnnotation, Span, CustomJS, Rect, Circle, Line,
HoverTool, BoxZoomTool, PanTool, WheelZoomTool, ResetTool, SaveTool,
WMTSTileSource, GMapPlot, GMapOptions,
LabelSet, Label, ColorBar, LinearColorMapper, BasicTicker, PrintfTickFormatter
)
from bokeh.palettes import viridis
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from bokeh import events
import numpy as np
import scipy
import scipy.signal
import pyfftw
from downsampling import DynamicDownsample
from helper import (
map_projection, WGS84_to_mercator, flight_modes_table, vtol_modes_table, get_lat_lon_alt_deg
)
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
ACTIVE_SCROLL_TOOLS = "wheel_zoom"
def plot_dropouts(p, dropouts, min_value, show_hover_tooltips=False):
""" plot small rectangles with given min_value offset """
if len(dropouts) == 0:
return
dropout_dict = {'left': [], 'right': [], 'top': [], 'bottom': [], 'duration' : []}
for dropout in dropouts:
d_start = dropout.timestamp
d_end = dropout.timestamp + dropout.duration * 1000
dropout_dict['left'].append(d_start)
dropout_dict['right'].append(d_end)
dropout_dict['top'].append(min_value + dropout.duration * 1000)
dropout_dict['bottom'].append(min_value)
dropout_dict['duration'].append(dropout.duration)
source = ColumnDataSource(dropout_dict)
quad = p.quad(left='left', right='right', top='top', bottom='bottom', source=source,
line_color='black', line_alpha=0.3, fill_color='black',
fill_alpha=0.15, legend_label='logging dropout')
if show_hover_tooltips:
p.add_tools(HoverTool(tooltips=[('dropout', '@duration ms')],
renderers=[quad]))
def add_virtual_fifo_topic_data(ulog, topic_name, instance=0):
""" adds a virtual topic by expanding the FIFO samples array into individual
samples, so it can be used for normal plotting.
new topic name: topic_name+'_virtual'
:return: True if topic data was added
"""
try:
cur_dataset = copy.deepcopy(ulog.get_dataset(topic_name, instance))
cur_dataset.name = topic_name+'_virtual'
t = cur_dataset.data['timestamp_sample']
dt = cur_dataset.data['dt']
samples = cur_dataset.data['samples']
scale = cur_dataset.data['scale']
total_samples = 0
for i in range(len(t)):
total_samples += samples[i]
t_new = np.zeros(total_samples, t.dtype)
xyz_new = [np.zeros(total_samples, np.float64) for i in range(3)]
sample = 0
# TODO: this could be faster...
for i, _ in enumerate(t):
for s in range(samples[i]):
t_new[sample+s] = t[i]-(samples[i]-s-1)*dt[i]
for j, axis in enumerate(['x', 'y', 'z']):
data_point = cur_dataset.data[axis+'['+str(s)+']'][i] * scale[i]
xyz_new[j][sample+s] = data_point
sample += samples[i]
cur_dataset.data['timestamp'] = t_new
cur_dataset.data['timestamp_sample'] = t_new
cur_dataset.data['x'] = xyz_new[0]
cur_dataset.data['y'] = xyz_new[1]
cur_dataset.data['z'] = xyz_new[2]
ulog.data_list.append(cur_dataset)
return True
except (KeyError, IndexError, ValueError) as error:
# log does not contain the value we are looking for
print(type(error), "(fifo data):", error)
return False
def plot_parameter_changes(p, plots_height, changed_parameters):
""" plot changed parameters as text with value into bokeh plot p """
timestamps = []
names = []
y_values = []
i = 0
for timestamp, name, value in changed_parameters:
timestamps.append(timestamp)
if isinstance(value, int):
names.append('⦁ ' + name + ': {:}'.format(value))
else:
names.append('⦁ ' + name + ': {:.2f}'.format(value))
# try to avoid overlapping text (TODO: do something more clever, dynamic?)
# (when changing this, make sure there's no overlap with flight mode labels)
y_values.append(plots_height - 70 - (i % 4) * 10)
i += 1
if len(names) > 0:
source = ColumnDataSource(data={'x': timestamps, 'names': names, 'y': y_values})
# plot as text with a fixed screen-space y offset
labels = LabelSet(x='x', y='y', text='names',
y_units='screen', level='glyph', #text_alpha=0.9, text_color='black',
source=source, render_mode='canvas', text_font_size='8pt')
p.add_layout(labels)
return labels
return None
def plot_flight_modes_background(data_plot, flight_mode_changes, vtol_states=None):
""" plot flight modes as filling background (with different colors) to a
DataPlot object """
vtol_state_height = 40
added_box_annotation_args = {}
p = data_plot.bokeh_plot
if vtol_states is not None:
added_box_annotation_args['bottom'] = vtol_state_height
added_box_annotation_args['bottom_units'] = 'screen'
labels_y_pos = []
labels_x_pos = []
labels_text = []
labels_color = []
labels_y_offset = data_plot.plot_height - 60
if data_plot.has_param_change_labels:
# make sure there's no overlap with changed parameter labels
labels_y_offset -= 10 + 4 * 10
for i in range(len(flight_mode_changes)-1):
t_start, mode = flight_mode_changes[i]
t_end, mode_next = flight_mode_changes[i + 1]
if mode in flight_modes_table:
mode_name, color = flight_modes_table[mode]
annotation = BoxAnnotation(left=int(t_start), right=int(t_end),
fill_alpha=0.09, line_color=None,
fill_color=color,
**added_box_annotation_args)
p.add_layout(annotation)
if flight_mode_changes[i+1][0] - t_start > 1e6: # filter fast
# switches to avoid overlap
labels_text.append(mode_name)
labels_x_pos.append(t_start)
labels_y_pos.append(labels_y_offset)
labels_color.append(color)
# plot flight mode names as labels
# they're only visible when the mouse is over the plot
if len(labels_text) > 0:
source = ColumnDataSource(data={'x': labels_x_pos, 'text': labels_text,
'y': labels_y_pos, 'textcolor': labels_color})
labels = LabelSet(x='x', y='y', text='text',
y_units='screen', level='underlay',
source=source, render_mode='canvas',
text_font_size='10pt',
text_color='textcolor', text_alpha=0.85,
background_fill_color='white',
background_fill_alpha=0.8, angle=90/180*np.pi,
text_align='right', text_baseline='top')
labels.visible = False # initially hidden
p.add_layout(labels)
# callback doc: https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/callbacks.html
code = """
labels.visible = cb_obj.event_name == "mouseenter";
"""
callback = CustomJS(args={'labels': labels}, code=code)
p.js_on_event(events.MouseEnter, callback)
p.js_on_event(events.MouseLeave, callback)
if vtol_states is not None:
for i in range(len(vtol_states)-1):
t_start, mode = vtol_states[i]
t_end, mode_next = vtol_states[i + 1]
if mode in vtol_modes_table:
mode_name, color = vtol_modes_table[mode]
p.add_layout(BoxAnnotation(left=int(t_start), right=int(t_end),
fill_alpha=0.09, line_color=None,
fill_color=color,
top=vtol_state_height, top_units='screen'))
# use screen coords so that the label always stays. It's a bit
# unfortunate that the x position includes the x-offset of the y-axis,
# which depends on the axis labels (e.g. 4.000e+5 creates a large offset)
label = Label(x=60, y=12, x_units='screen', y_units='screen',
text='VTOL mode', text_font_size='10pt', level='glyph',
background_fill_color='white', background_fill_alpha=0.8)
p.add_layout(label)
split_line = Span(location=vtol_state_height, location_units='screen',
dimension='width', line_color='black',
line_width=1, line_alpha=0.5)
p.add_layout(split_line)
def plot_set_equal_aspect_ratio(p, x, y, zoom_out_factor=1.3, min_range=5):
"""
Set plot range and make sure both plotting axis have an equal scaling.
The plot size must already have been set before calling this.
"""
x_range = [np.amin(x), np.amax(x)]
x_diff = max(x_range[1]-x_range[0], min_range)
x_center = (x_range[0]+x_range[1])/2
y_range = [np.amin(y), np.amax(y)]
y_diff = max(y_range[1]-y_range[0], min_range)
y_center = (y_range[0]+y_range[1])/2
# keep same aspect ratio as the plot
aspect = p.plot_width / p.plot_height
if aspect > x_diff / y_diff:
x_diff = y_diff * aspect
else:
y_diff = x_diff / aspect
p.x_range = Range1d(start=x_center - x_diff/2 * zoom_out_factor,
end=x_center + x_diff/2 * zoom_out_factor, bounds=None)
p.y_range = Range1d(start=y_center - y_diff/2 * zoom_out_factor,
end=y_center + y_diff/2 * zoom_out_factor, bounds=None)
p.select_one(BoxZoomTool).match_aspect = True
# GPS map
def plot_map(ulog, config, map_type='plain', api_key=None, setpoints=False,
bokeh_plot=None):
"""
Do a 2D position plot
:param map_type: one of 'osm', 'google', 'plain'
:param bokeh_plot: if None, create a new bokeh plot, otherwise use the
supplied one (only for 'plain' map_type)
:return: bokeh plot object
"""
try:
cur_dataset = ulog.get_dataset('vehicle_gps_position')
t = cur_dataset.data['timestamp']
indices = cur_dataset.data['fix_type'] > 2 # use only data with a fix
t = t[indices]
lat, lon, _ = get_lat_lon_alt_deg(ulog, cur_dataset)
plots_width = config['plot_width']
plots_height = config['plot_height']['large']
anchor_lat = 0
anchor_lon = 0
if len(t) == 0:
raise ValueError('No valid GPS position data')
if map_type == 'google':
data_source = ColumnDataSource(data={'lat': lat, 'lon': lon})
lon_center = (np.amin(lon) + np.amax(lon)) / 2
lat_center = (np.amin(lat) + np.amax(lat)) / 2
map_options = GMapOptions(lat=lat_center, lng=lon_center,
map_type="hybrid", zoom=19)
# possible map types: satellite, roadmap, terrain, hybrid
p = GMapPlot(
x_range=Range1d(), y_range=Range1d(), map_options=map_options,
api_key=api_key, plot_width=plots_width, plot_height=plots_height
)
pan = PanTool()
wheel_zoom = WheelZoomTool()
p.add_tools(pan, wheel_zoom)
p.toolbar.active_scroll = wheel_zoom
line = Line(x="lon", y="lat", line_width=2, line_color=config['maps_line_color'])
p.add_glyph(data_source, line)
elif map_type == 'osm':
# OpenStreetMaps
# transform coordinates
lon, lat = WGS84_to_mercator(lon, lat)
data_source = ColumnDataSource(data={'lat': lat, 'lon': lon})
p = figure(tools=TOOLS, active_scroll=ACTIVE_SCROLL_TOOLS)
p.plot_width = plots_width
p.plot_height = plots_height
plot_set_equal_aspect_ratio(p, lon, lat)
p.background_fill_color = "lightgray"
p.axis.visible = False
tile_options = {}
# thunderforest
tile_options['url'] = 'http://b.tile.thunderforest.com/landscape/{z}/{x}/{y}.png'
tile_options['attribution'] = 'Maps © <a href="http://www.thunderforest.com">Thunderforest</a>, Data © <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors '
# default OpenStreetMaps
# tile_options['url'] = 'http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png'
# tile_options['attribution'] = '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors '
# FIXME: tiles disabled for now due to a rendering bug
# tile_source = WMTSTileSource(**tile_options)
# tile_renderer_options = {}
# p.add_tile(tile_source, **tile_renderer_options)
# stamen (black & white)
# STAMEN_TONER = WMTSTileSource(
# url='http://tile.stamen.com/toner/{Z}/{X}/{Y}.png',
# attribution=(
# 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, '
# 'under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>.'
# 'Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, '
# 'under <a href="http://www.openstreetmap.org/copyright">ODbL</a>'
# )
# )
# p.add_tile(STAMEN_TONER)
p.line(x='lon', y='lat', source=data_source, line_width=2,
line_color=config['maps_line_color'])
else: # plain
# transform coordinates
lat = np.deg2rad(lat)
lon = np.deg2rad(lon)
anchor_lat = lat[0]
anchor_lon = lon[0]
# try to get the anchor position from the dataset
try:
local_pos_data = ulog.get_dataset('vehicle_local_position')
indices = np.nonzero(local_pos_data.data['ref_timestamp'])
if len(indices[0]) > 0:
anchor_lat = np.deg2rad(local_pos_data.data['ref_lat'][indices[0][0]])
anchor_lon = np.deg2rad(local_pos_data.data['ref_lon'][indices[0][0]])
except:
pass
lat, lon = map_projection(lat, lon, anchor_lat, anchor_lon)
data_source = ColumnDataSource(data={'lat': lat, 'lon': lon})
if bokeh_plot is None:
p = figure(tools=TOOLS, active_scroll=ACTIVE_SCROLL_TOOLS,
x_axis_label='[m]', y_axis_label='[m]')
p.plot_width = plots_width
p.plot_height = plots_height
plot_set_equal_aspect_ratio(p, lon, lat)
else:
p = bokeh_plot
# TODO: altitude line coloring
p.line(x='lon', y='lat', source=data_source, line_width=2,
line_color=config['maps_line_color'], legend_label='GPS (projected)')
if setpoints:
# draw (mission) setpoint as circles
try:
cur_dataset = ulog.get_dataset('position_setpoint_triplet')
lon = cur_dataset.data['current.lon'] # degrees
lat = cur_dataset.data['current.lat']
if map_type == 'osm':
lon, lat = WGS84_to_mercator(lon, lat)
elif map_type == 'plain':
lat = np.deg2rad(lat)
lon = np.deg2rad(lon)
lat, lon = map_projection(lat, lon, anchor_lat, anchor_lon)
data_source = ColumnDataSource(data={'lat': lat, 'lon': lon})
p.circle(x='lon', y='lat', source=data_source,
line_width=2, size=6, line_color=config['mission_setpoint_color'],
fill_color=None, legend_label='Position Setpoints')
except:
pass
except (KeyError, IndexError, ValueError) as error:
# log does not contain the value we are looking for
print(type(error), "(vehicle_gps_position):", error)
return None
p.toolbar.logo = None
# make it possible to hide graphs by clicking on the label
p.legend.click_policy = "hide"
return p
class DataPlot:
"""
Handle the bokeh plot generation from an ULog dataset
"""
def __init__(self, data, config, data_name, x_axis_label=None,
y_axis_label=None, title=None, plot_height='normal',
y_range=None, y_start=None, changed_params=None,
topic_instance=0, x_range=None):
self._had_error = False
self._previous_success = False
self._param_change_label = None
self._data = data
self._config = config
self._plot_height_name = plot_height
self._data_name = data_name
self._cur_dataset = None
self._use_time_formatter = True
try:
self._p = figure(title=title, x_axis_label=x_axis_label,
y_axis_label=y_axis_label, tools=TOOLS,
active_scroll=ACTIVE_SCROLL_TOOLS)
if y_range is not None:
self._p.y_range = Range1d(y_range.start, y_range.end)
if x_range is not None:
# we need a copy, otherwise x-axis zooming will be synchronized
# between all plots
self._p.x_range = Range1d(x_range.start, x_range.end)
if changed_params is not None:
self._param_change_label = \
plot_parameter_changes(self._p, self.plot_height,
changed_params)
self._cur_dataset = [elem for elem in data
if elem.name == data_name and elem.multi_id == topic_instance][0]
if y_start is not None:
# make sure y axis starts at y_start. We do it by adding an invisible circle
self._p.circle(x=int(self._cur_dataset.data['timestamp'][0]),
y=y_start, size=0, alpha=0)
except (KeyError, IndexError, ValueError) as error:
print(type(error), "("+self._data_name+"):", error)
self._had_error = True
@property
def title(self):
""" return the bokeh title """
if self._p is not None:
title_text = self._p.title.text # pylint: disable=no-member
else:
title_text = ""
return title_text
@property
def bokeh_plot(self):
""" return the bokeh plot """
return self._p
@property
def param_change_label(self):
""" returns bokeh LabelSet or None """
return self._param_change_label
@property
def has_param_change_labels(self):
""" Does the plot have changed parameter labels? """
return self._param_change_label is not None
@property
def had_error(self):
""" Returns true if the previous plotting calls had an error (e.g. due
to missing data in the log) """
return self._had_error
@property
def dataset(self):
""" get current dataset """
return self._cur_dataset
def change_dataset(self, data_name, topic_instance=0):
""" select a new dataset. Afterwards, call add_graph etc """
self._data_name = data_name
if not self._had_error: self._previous_success = True
self._had_error = False
try:
self._cur_dataset = [elem for elem in self._data
if elem.name == data_name and elem.multi_id == topic_instance][0]
except (KeyError, IndexError, ValueError) as error:
print(type(error), "("+self._data_name+"):", error)
self._had_error = True
self._cur_dataset = None
def add_graph(self, field_names, colors, legends, use_downsample=True,
mark_nan=False, use_step_lines=False):
""" add 1 or more lines to a graph
field_names can be a list of fields from the data set, or a list of
functions with the data set as argument and returning a tuple of
(field_name, data)
:param mark_nan: if True, add an indicator to the plot when one of the graphs is NaN
:param use_step_lines: if True, render step lines (after each point)
instead of rendering a straight line to the next point
"""
if self._had_error: return
try:
p = self._p
data_set = {}
data_set['timestamp'] = self._cur_dataset.data['timestamp']
field_names_expanded = self._expand_field_names(field_names, data_set)
if mark_nan:
# look through the data to find NaN's and store their timestamps
nan_timestamps = set()
for key in field_names_expanded:
nan_indexes = np.argwhere(np.isnan(data_set[key]))
last_index = -2
for ind in nan_indexes:
if last_index + 1 != ind: # store only timestamps at the start of NaN
nan_timestamps.add(data_set['timestamp'][ind][0])
last_index = ind
nan_color = 'black'
for nan_timestamp in nan_timestamps:
nan_line = Span(location=nan_timestamp,
dimension='height', line_color=nan_color,
line_dash='dashed', line_width=2)
p.add_layout(nan_line)
if len(nan_timestamps) > 0:
y_values = [30] * len(nan_timestamps)
# NaN label: add a space to separate it from the line
names = [' NaN'] * len(nan_timestamps)
source = ColumnDataSource(data={'x': np.array(list(nan_timestamps)),
'names': names, 'y': y_values})
# plot as text with a fixed screen-space y offset
labels = LabelSet(x='x', y='y', text='names',
y_units='screen', level='glyph', text_color=nan_color,
source=source, render_mode='canvas')
p.add_layout(labels)
if use_downsample:
# we directly pass the data_set, downsample and then create the
# ColumnDataSource object, which is much faster than
# first creating ColumnDataSource, and then downsample
downsample = DynamicDownsample(p, data_set, 'timestamp')
data_source = downsample.data_source
else:
data_source = ColumnDataSource(data=data_set)
for field_name, color, legend in zip(field_names_expanded, colors, legends):
if use_step_lines:
p.step(x='timestamp', y=field_name, source=data_source,
legend_label=legend, line_width=2, line_color=color,
mode="after")
else:
p.line(x='timestamp', y=field_name, source=data_source,
legend_label=legend, line_width=2, line_color=color)
except (KeyError, IndexError, ValueError) as error:
print(type(error), "("+self._data_name+"):", error)
self._had_error = True
def add_circle(self, field_names, colors, legends):
""" add circles
see add_graph for arguments description
"""
if self._had_error: return
try:
p = self._p
data_set = {}
data_set['timestamp'] = self._cur_dataset.data['timestamp']
field_names_expanded = self._expand_field_names(field_names, data_set)
data_source = ColumnDataSource(data=data_set)
for field_name, color, legend in zip(field_names_expanded, colors, legends):
p.circle(x='timestamp', y=field_name, source=data_source,
legend_label=legend, line_width=2, size=4, line_color=color,
fill_color=None)
except (KeyError, IndexError, ValueError) as error:
print(type(error), "("+self._data_name+"):", error)
self._had_error = True
def _expand_field_names(self, field_names, data_set):
"""
expand field names if they're a function
"""
field_names_expanded = []
for field_name in field_names:
if hasattr(field_name, '__call__'):
new_field_name, new_data = field_name(self._cur_dataset.data)
data_set[new_field_name] = new_data
field_names_expanded.append(new_field_name)
else:
data_set[field_name] = self._cur_dataset.data[field_name]
field_names_expanded.append(field_name)
return field_names_expanded
def add_span(self, field_name, accumulator_func=np.mean,
line_color='black', line_alpha=0.5):
""" Add a vertical line. Location is determined by accumulating a
dataset """
if self._had_error: return
try:
accumulated_data = accumulator_func(self._cur_dataset.data[field_name])
value = accumulated_data.item()
if not np.isnan(value):
data_span = Span(location=value,
dimension='width', line_color=line_color,
line_alpha=line_alpha, line_width=1)
self._p.add_layout(data_span)
except (KeyError, IndexError, ValueError) as error:
print(type(error), "("+self._data_name+"):", error)
self._had_error = True
def add_horizontal_background_boxes(self, colors, limits):
""" Add horizontal background boxes filled with a color
:param colors: list of N colors (in increasing y direction)
:param limits: list of N-1 y-axis values
"""
bottom = None
limits.append(None)
for color, limit in zip(colors, limits):
self._p.add_layout(BoxAnnotation(
bottom=bottom, top=limit, fill_alpha=0.09, line_alpha=1,
fill_color=color, line_width=0))
bottom = limit
def set_use_time_formatter(self, use_formatter):
""" configure whether the time formatter should be used """
self._use_time_formatter = use_formatter
def finalize(self):
""" Call this after all plots are done. Returns the bokeh plot, or None
on error """
if self._had_error and not self._previous_success:
return None
self._setup_plot()
return self._p
@property
def plot_height(self):
""" get the height of the plot in screen pixels """
return self._config['plot_height'][self._plot_height_name]
def _setup_plot(self):
plots_width = self._config['plot_width']
plots_height = self.plot_height
p = self._p
p.plot_width = plots_width
p.plot_height = plots_height
# -> other attributes are set via theme.yaml
# disable x grid lines
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = 'navy'
p.ygrid.grid_line_alpha = 0.13
p.ygrid.minor_grid_line_color = 'navy'
p.ygrid.minor_grid_line_alpha = 0.05
p.toolbar.logo = None # hide the bokeh logo (we give credit at the
# bottom of the page)
#p.lod_threshold=None # turn off level-of-detail
# axis labels: format time
if self._use_time_formatter:
p.xaxis[0].formatter = FuncTickFormatter(code='''
//func arguments: ticks, x_range
// assume us ticks
var ms = Math.round(tick / 1000);
var sec = Math.floor(ms / 1000);
var minutes = Math.floor(sec / 60);
var hours = Math.floor(minutes / 60);
ms = ms % 1000;
sec = sec % 60;
minutes = minutes % 60;
function pad(num, size) {
var s = num+"";
while (s.length < size) s = "0" + s;
return s;
}
if (hours > 0) {
var ret_val = hours + ":" + pad(minutes, 2) + ":" + pad(sec,2);
} else {
var ret_val = minutes + ":" + pad(sec,2);
}
if (x_range.end - x_range.start < 4e6) {
ret_val = ret_val + "." + pad(ms, 3);
}
return ret_val;
''', args={'x_range': p.x_range})
# make it possible to hide graphs by clicking on the label
p.legend.click_policy = "hide"
class DataPlot2D(DataPlot):
"""
A 2D plot (without map)
This does not do downsampling.
"""
def __init__(self, data, config, data_name, x_axis_label=None,
y_axis_label=None, title=None, plot_height='normal',
equal_aspect=True):
super().__init__(data, config, data_name,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
title=title, plot_height=plot_height)
self._equal_aspect = equal_aspect
self._is_first_graph = True
self._p.plot_width = self._config['plot_width']
self._p.plot_height = self._config['plot_height'][self._plot_height_name]
def add_graph(self, dataset_x, dataset_y, color, legend, check_if_all_zero=False):
""" add a line to the graph
"""
if self._had_error: return
try:
p = self._p
x = self._cur_dataset.data[dataset_x]
y = self._cur_dataset.data[dataset_y]
# FIXME: bokeh should be able to handle np.nan values properly, but
# we still get a ValueError('Out of range float values are not JSON
# compliant'), if x or y contains nan
non_nan_indexes = np.logical_not(np.logical_or(np.isnan(x), np.isnan(y)))
x = x[non_nan_indexes]
y = y[non_nan_indexes]
if check_if_all_zero:
if np.count_nonzero(x) == 0 and np.count_nonzero(y) == 0:
raise ValueError()
data_source = ColumnDataSource(data={'x': x, 'y': y})
p.line(x="x", y="y", source=data_source, line_width=2,
line_color=color, legend_label=legend)
if self._is_first_graph:
self._is_first_graph = False
if self._equal_aspect:
plot_set_equal_aspect_ratio(p, x, y)
except (KeyError, IndexError, ValueError) as error:
print(type(error), "("+self._data_name+"):", error)
self._had_error = True
def _setup_plot(self):
p = self._p
p.toolbar.logo = None
class DataPlotSpec(DataPlot):
"""
A spectrogram plot.
This does not downsample dynamically.
A spectrogram plot is only added to the plotting page if the sampling frequency of the dataset is higher than 100Hz.
"""
def __init__(self, data, config, data_name, x_axis_label=None,
y_axis_label=None, title=None, plot_height='small',
x_range=None, y_range=None, topic_instance=0):
super().__init__(data, config, data_name, x_axis_label=x_axis_label,
y_axis_label=y_axis_label, title=title, plot_height=plot_height,
x_range=x_range, y_range=y_range, topic_instance=topic_instance)
def add_graph(self, field_names, legends, window='hann', window_length=256, noverlap=128):
""" add a spectrogram plot to the graph
field_names: can be a list of fields from the data set, or a list of
functions with the data set as argument and returning a tuple of
(field_name, data)
legends: description for the field_names that will appear in the title of the plot
window: the type of window to use for the frequency analysis. check scipy documentation for available window types.
window_length: length of the analysis window in samples.
noverlap: number of overlapping samples between windows.
"""
if self._had_error: return
try:
data_set = {}
timestamp_key = 'timestamp'
if 'timestamp_sample' in self._cur_dataset.data.keys():
timestamp_key = 'timestamp_sample'
data_set[timestamp_key] = self._cur_dataset.data[timestamp_key]
# calculate the sampling frequency
# (Note: logging dropouts are not taken into account here)
delta_t = ((data_set[timestamp_key][-1] - data_set[timestamp_key][0]) * 1.0e-6) / len(data_set[timestamp_key])
if delta_t < 0.000001: # avoid division by zero
self._had_error = True
return
sampling_frequency = int(1.0 / delta_t)
if sampling_frequency < 100: # require min sampling freq
self._had_error = True
return
field_names_expanded = self._expand_field_names(field_names, data_set)
# calculate the spectrogram
psd = {}
for key in field_names_expanded:
frequency, time, psd[key] = scipy.signal.spectrogram(
data_set[key], fs=sampling_frequency, window=window,
nperseg=window_length, noverlap=noverlap, scaling='density')
# sum all psd's
key_it = iter(psd)
sum_psd = psd[next(key_it)]
for key in key_it:
sum_psd += psd[key]
# offset = int(((1024/2.0)/250.0)*1e6)
# scale time to microseconds and add start time as offset
time = time * 1.0e6 + self._cur_dataset.data[timestamp_key][0]
inner_image = 10 * np.log10(sum_psd)
# Bokeh/JSON can't handle -inf.
# Replace any -inf values with the smallest finite number in the
# dataset. We aren't using something like INT_MIN because we
# don't want to mess up scaling too much.
if -np.inf in inner_image:
finite_min = np.min(np.ma.masked_invalid(inner_image))
inner_image[inner_image == -np.inf] = finite_min
image = [inner_image]
title = self.title
for legend in legends:
title += " " + legend
title += " [dB]"
# assume maximal data points per pixel at full resolution
max_num_data_points = 2.0*self._config['plot_width']
if len(time) > max_num_data_points:
step_size = int(len(time) / max_num_data_points)
time = time[::step_size]
image[0] = image[0][:, ::step_size]
color_mapper = LinearColorMapper(palette="Viridis256", low=np.amin(image), high=np.amax(image))
self._p.y_range = Range1d(frequency[0], frequency[-1])
self._p.toolbar_location = 'above'
self._p.image(image=image, x=time[0], y=frequency[0], dw=(time[-1]-time[0]),
dh=(frequency[-1]-frequency[0]), color_mapper=color_mapper)
color_bar = ColorBar(color_mapper=color_mapper,
major_label_text_font_size="5pt",
ticker=BasicTicker(desired_num_ticks=5),
formatter=PrintfTickFormatter(format="%f"),
title='[dB]',
label_standoff=6, border_line_color=None, location=(0, 0))
self._p.add_layout(color_bar, 'right')
# add plot zoom tool that only zooms in time axis
wheel_zoom = WheelZoomTool()
self._p.toolbar.tools = [PanTool(), wheel_zoom, BoxZoomTool(), ResetTool(), SaveTool()] # updated_tools
self._p.toolbar.active_scroll = wheel_zoom
except (KeyError, IndexError, ValueError, ZeroDivisionError) as error:
print(type(error), "(" + self._data_name + "):", error)
self._had_error = True
class DataPlotFFT(DataPlot):
"""
An FFT plot.
This does not downsample dynamically.
An FFT plot is only added to the plotting page if the sampling frequency of
the dataset is higher than 100Hz.
"""
def __init__(self, data, config, data_name,
title=None, plot_height='small',
x_range=None, y_range=None, topic_instance=0):
super().__init__(data, config, data_name, x_axis_label='Hz',
y_axis_label='Amplitude', title=title, plot_height=plot_height,
x_range=x_range, y_range=y_range, topic_instance=topic_instance)
self._use_time_formatter = False
def add_graph(self, field_names, colors, legends):
""" add an FFT plot to the graph
field_names: can be a list of fields from the data set, or a list of
functions with the data set as argument and returning a tuple of
(field_name, data)
legends: description for the field_names that will appear in the title of the plot
"""
if self._had_error: return
try:
data_set = {}
timestamp_key = 'timestamp'
if 'timestamp_sample' in self._cur_dataset.data.keys():
timestamp_key = 'timestamp_sample'
data_set[timestamp_key] = self._cur_dataset.data[timestamp_key]
data_len = len(data_set[timestamp_key])
# calculate the sampling frequency
# (Note: logging dropouts are not taken into account here)
delta_t = ((data_set[timestamp_key][-1] - data_set[timestamp_key][0]) * 1.0e-6) / data_len
sampling_frequency = 1.0 / delta_t
if sampling_frequency < 100 or sampling_frequency == float("inf"): # require min sampling freq
self._had_error = True
return
field_names_expanded = self._expand_field_names(field_names, data_set)
# we use fftw instead of scipy.fft, because it is much faster for
# input lengths that factorize into large primes.
pyfftw.interfaces.cache.enable()
freqs = scipy.fftpack.fftfreq(data_len, delta_t)
mean_start_freq = 40
plot_data = []
for field_name, color, legend in zip(field_names_expanded, colors, legends):
# call FFTW with reduced setup effort (which is faster for our
# use-case with varying input lengths)
fft_values = 2/data_len*abs(pyfftw.interfaces.numpy_fft.fft(
data_set[field_name], planner_effort='FFTW_ESTIMATE'))
mean_fft_value = np.mean(fft_values[np.argwhere(freqs >= mean_start_freq).flatten()])
legend = legend + " (mean above {:} Hz: {:.2f})".format(mean_start_freq, mean_fft_value)
plot_data.append((fft_values, mean_fft_value, legend, color))
for fft_values, mean_fft_value, legend, color in plot_data:
fft_plot_values = fft_values[:len(freqs)//2]
freqs_plot = freqs[:len(freqs)//2]
# downsample if necessary
max_num_data_points = 3.0*self._config['plot_width']
if len(fft_plot_values) > max_num_data_points:
step_size = int(len(fft_plot_values) / max_num_data_points)
fft_plot_values = fft_plot_values[::step_size]
freqs_plot = freqs_plot[::step_size]
self._p.line(freqs_plot, fft_plot_values, # pylint: disable=too-many-function-args
line_color=color, line_width=2, legend_label=legend, alpha=0.8)
# plot the mean lines above the fft graphs
for fft_values, mean_fft_value, legend, color in plot_data:
self._p.line([mean_start_freq, np.max(freqs)], # pylint: disable=too-many-function-args
[mean_fft_value, mean_fft_value],
line_color=color, line_width=2, legend_label=legend)
except (KeyError, IndexError, ValueError, ZeroDivisionError) as error:
print(type(error), "(" + self._data_name + "):", error)
self._had_error = True
def mark_frequency(self, frequency, label, y_screen_offset=0):
"""
Add a vertical line with a label to mark a certain frequency
"""
p = self._p
mark_color = 'black'
mark_line = Span(location=frequency,
dimension='height', line_color=mark_color,
line_width=1)
p.add_layout(mark_line)
# label: add a space to separate it from the line
label = ' ' + label
# plot as text with a fixed screen-space y offset
label = Label(x=frequency, y=self.plot_height/2-10-y_screen_offset,
text=label, y_units='screen', level='glyph',
text_font_size='8pt', text_color=mark_color,
render_mode='canvas')
p.add_layout(label)
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,375 | PX4/flight_review | refs/heads/main | /app/tornado_handlers/db_info_json.py | """
Tornado handler for the JSON public log list retrieval
"""
from __future__ import print_function
import json
import sqlite3
import os
import sys
import tornado.web
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../plot_app'))
from config import get_db_filename
from db_entry import DBData
from helper import get_airframe_data
#pylint: disable=relative-beyond-top-level
from .common import get_generated_db_data_from_log
#pylint: disable=abstract-method
class DBInfoHandler(tornado.web.RequestHandler):
""" Get database info (JSON list of public logs) Tornado request handler """
def get(self, *args, **kwargs):
""" GET request """
jsonlist = []
# get the logs (but only the public ones)
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
# get vehicle name information from vehicle table
cur.execute('select UUID, Name from Vehicle')
db_tuples = cur.fetchall()
vehicle_table = {db_tuple[0]: db_tuple[1] for db_tuple in db_tuples}
cur.execute('SELECT Id, Date, Description, WindSpeed, Rating, VideoUrl, ErrorLabels, '
'Source, Feedback, Type FROM Logs WHERE Public = 1 AND NOT Source = "CI"')
# need to fetch all here, because we will do more SQL calls while
# iterating (having multiple cursor's does not seem to work)
db_tuples = cur.fetchall()
for db_tuple in db_tuples:
jsondict = {}
db_data = DBData()
log_id = db_tuple[0]
jsondict['log_id'] = log_id
jsondict['log_date'] = db_tuple[1].strftime('%Y-%m-%d')
db_data.description = db_tuple[2]
db_data.wind_speed = db_tuple[3]
db_data.rating = db_tuple[4]
db_data.video_url = db_tuple[5]
db_data.error_labels = sorted([int(x) for x in db_tuple[6].split(',') if len(x) > 0]) \
if db_tuple[6] else []
db_data.source = db_tuple[7]
db_data.feedback = db_tuple[8]
db_data.type = db_tuple[9]
jsondict.update(db_data.to_json_dict())
db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
if db_data_gen is None:
continue
jsondict.update(db_data_gen.to_json_dict())
# add vehicle name
jsondict['vehicle_name'] = vehicle_table.get(jsondict['vehicle_uuid'], '')
airframe_data = get_airframe_data(jsondict['sys_autostart_id'])
jsondict['airframe_name'] = airframe_data.get('name', '') \
if airframe_data is not None else ''
jsondict['airframe_type'] = airframe_data.get('type', jsondict['sys_autostart_id']) \
if airframe_data is not None else jsondict['sys_autostart_id']
jsonlist.append(jsondict)
cur.close()
con.close()
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(jsonlist))
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,376 | PX4/flight_review | refs/heads/main | /app/serve.py | #! /usr/bin/env python3
""" Script to run the bokeh server """
from __future__ import absolute_import
from __future__ import print_function
import argparse
import os
import sys
import errno
from bokeh.application import Application
from bokeh.server.server import Server
from bokeh.application.handlers import DirectoryHandler
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plot_app'))
from tornado.web import StaticFileHandler
from tornado.web import RedirectHandler
from tornado_handlers.download import DownloadHandler
from tornado_handlers.upload import UploadHandler
from tornado_handlers.browse import BrowseHandler, BrowseDataRetrievalHandler
from tornado_handlers.edit_entry import EditEntryHandler
from tornado_handlers.db_info_json import DBInfoHandler
from tornado_handlers.three_d import ThreeDHandler
from tornado_handlers.radio_controller import RadioControllerHandler
from tornado_handlers.error_labels import UpdateErrorLabelHandler
from helper import set_log_id_is_filename, print_cache_info #pylint: disable=C0411
from config import debug_print_timing, get_overview_img_filepath #pylint: disable=C0411
#pylint: disable=invalid-name
def _fixup_deprecated_host_args(arguments):
# --host is deprecated since bokeh 0.12.5. You might want to use
# --allow-websocket-origin instead
if arguments.host is not None and len(arguments.host) > 0:
if arguments.allow_websocket_origin is None:
arguments.allow_websocket_origin = []
arguments.allow_websocket_origin += arguments.host
arguments.allow_websocket_origin = list(set(arguments.allow_websocket_origin))
parser = argparse.ArgumentParser(description='Start bokeh Server')
parser.add_argument('-s', '--show', dest='show', action='store_true',
help='Open browser on startup')
parser.add_argument('--use-xheaders', action='store_true',
help="Prefer X-headers for IP/protocol information")
parser.add_argument('-f', '--file', metavar='file.ulg', action='store',
help='Directly show an ULog file, only for local use (implies -s)',
default=None)
parser.add_argument('--3d', dest='threed', action='store_true',
help='Open 3D page (only if --file is provided)')
parser.add_argument('--pid-analysis', dest='pid_analysis', action='store_true',
help='Open PID analysis page (only if --file is provided)')
parser.add_argument('--num-procs', dest='numprocs', type=int, action='store',
help="""Number of worker processes. Default to 1.
0 will autodetect number of cores""",
default=1)
parser.add_argument('--port', type=int, action='store',
help='Port to listen on', default=None)
parser.add_argument('--address', action='store',
help='Network address to listen to', default=None)
parser.add_argument('--host', action='append', type=str, metavar='HOST[:PORT]',
help="""Hosts whitelist, that must match the Host header in new
requests. It has the form <host>[:<port>]. If no port is specified, 80
is used. You should use the DNS name of the public endpoint here. \'*\'
matches all hosts (for testing only) (default=localhost)""",
default=None)
parser.add_argument('--allow-websocket-origin', action='append', type=str, metavar='HOST[:PORT]',
help="""Public hostnames which may connect to the Bokeh websocket""",
default=None)
args = parser.parse_args()
# This should remain here until --host is removed entirely
_fixup_deprecated_host_args(args)
applications = {}
main_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plot_app')
handler = DirectoryHandler(filename=main_path)
applications['/plot_app'] = Application(handler)
server_kwargs = {}
if args.port is not None: server_kwargs['port'] = args.port
if args.use_xheaders: server_kwargs['use_xheaders'] = args.use_xheaders
server_kwargs['num_procs'] = args.numprocs
if args.address is not None: server_kwargs['address'] = args.address
if args.host is not None: server_kwargs['host'] = args.host
if args.allow_websocket_origin is not None:
server_kwargs['allow_websocket_origin'] = args.allow_websocket_origin
server_kwargs['websocket_max_message_size'] = 100 * 1024 * 1024
# increase the maximum upload size (default is 100MB)
server_kwargs['http_server_kwargs'] = {'max_buffer_size': 300 * 1024 * 1024}
show_ulog_file = False
show_3d_page = False
show_pid_analysis_page = False
if args.file is not None:
ulog_file = os.path.abspath(args.file)
show_ulog_file = True
args.show = True
show_3d_page = args.threed
show_pid_analysis_page = args.pid_analysis
set_log_id_is_filename(show_ulog_file)
# additional request handlers
extra_patterns = [
(r'/upload', UploadHandler),
(r'/browse', BrowseHandler),
(r'/browse_data_retrieval', BrowseDataRetrievalHandler),
(r'/3d', ThreeDHandler),
(r'/radio_controller', RadioControllerHandler),
(r'/edit_entry', EditEntryHandler),
(r'/?', UploadHandler), #root should point to upload
(r'/download', DownloadHandler),
(r'/dbinfo', DBInfoHandler),
(r'/error_label', UpdateErrorLabelHandler),
(r"/stats", RedirectHandler, {"url": "/plot_app?stats=1"}),
(r'/overview_img/(.*)', StaticFileHandler, {'path': get_overview_img_filepath()}),
]
server = None
custom_port = 5006
while server is None:
try:
server = Server(applications, extra_patterns=extra_patterns, **server_kwargs)
except OSError as e:
# if we get a port bind error and running locally with '-f',
# automatically select another port (useful for opening multiple logs)
if e.errno == errno.EADDRINUSE and show_ulog_file:
custom_port += 1
server_kwargs['port'] = custom_port
else:
raise
if args.show:
# we have to defer opening in browser until we start up the server
def show_callback():
""" callback to open a browser window after server is fully initialized"""
if show_ulog_file:
if show_3d_page:
server.show('/3d?log='+ulog_file)
elif show_pid_analysis_page:
server.show('/plot_app?plots=pid_analysis&log='+ulog_file)
else:
server.show('/plot_app?log='+ulog_file)
else:
server.show('/upload')
server.io_loop.add_callback(show_callback)
if debug_print_timing():
def print_statistics():
""" print ulog cache info once per hour """
print_cache_info()
server.io_loop.call_later(60*60, print_statistics)
server.io_loop.call_later(60, print_statistics)
# run_until_shutdown has been added 0.12.4 and is the preferred start method
run_op = getattr(server, "run_until_shutdown", None)
if callable(run_op):
server.run_until_shutdown()
else:
server.start()
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,377 | PX4/flight_review | refs/heads/main | /app/plot_app/db_entry.py | """ Database entry classes """
from html import escape
from pyulog import *
from pyulog.px4 import *
from helper import get_log_filename, load_ulog_file
#pylint: disable=missing-docstring, too-few-public-methods
class DBData:
""" simple class that contains information from the DB entry of a single
log file """
def __init__(self):
self.description = ''
self.feedback = ''
self.type = 'personal'
self.wind_speed = -1
self.rating = ''
self.video_url = ''
self.error_labels = []
self.source = ''
super().__init__()
def wind_speed_str(self):
return self.wind_speed_str_static(self.wind_speed)
@staticmethod
def wind_speed_str_static(wind_speed):
return {0: 'Calm', 5: 'Breeze', 8: 'Gale', 10: 'Storm'}.get(wind_speed, '')
def rating_str(self):
return self.rating_str_static(self.rating)
@staticmethod
def rating_str_static(rating):
return {'crash_pilot': 'Crashed (Pilot error)',
'crash_sw_hw': 'Crashed (Software or Hardware issue)',
'unsatisfactory': 'Unsatisfactory',
'good': 'Good',
'great': 'Great!'}.get(rating, '')
def to_json_dict(self):
jsondict = {}
jsondict['description'] = self.description
jsondict['feedback'] = self.feedback
jsondict['type'] = self.type
jsondict['wind_speed'] = self.wind_speed
jsondict['rating'] = self.rating
jsondict['video_url'] = self.video_url
jsondict['error_labels'] = self.error_labels
jsondict['source'] = self.source
return jsondict
class DBDataGenerated:
""" information from the generated DB entry """
def __init__(self):
self.start_time_utc = 0
self.duration_s = 0
self.mav_type = ''
self.estimator = ''
self.sys_autostart_id = 0
self.sys_hw = ''
self.ver_sw = ''
self.ver_sw_release = ''
self.num_logged_errors = 0
self.num_logged_warnings = 0
self.flight_modes = set()
self.vehicle_uuid = ''
self.flight_mode_durations = [] # list of tuples of (mode, duration sec)
super().__init__()
def flight_mode_durations_str(self):
ret = []
for duration in self.flight_mode_durations:
ret.append(str(duration[0])+':'+str(duration[1]))
return ','.join(ret)
@classmethod
def from_log_file(cls, log_id):
""" initialize from a log file """
obj = cls()
ulog_file_name = get_log_filename(log_id)
ulog = load_ulog_file(ulog_file_name)
px4_ulog = PX4ULog(ulog)
# extract information
obj.duration_s = int((ulog.last_timestamp - ulog.start_timestamp)/1e6)
obj.mav_type = px4_ulog.get_mav_type()
obj.estimator = px4_ulog.get_estimator()
obj.sys_autostart_id = ulog.initial_parameters.get('SYS_AUTOSTART', 0)
obj.sys_hw = escape(ulog.msg_info_dict.get('ver_hw', ''))
obj.ver_sw = escape(ulog.msg_info_dict.get('ver_sw', ''))
version_info = ulog.get_version_info()
if version_info is not None:
obj.ver_sw_release = 'v{}.{}.{} {}'.format(*version_info)
obj.num_logged_errors = 0
obj.num_logged_warnings = 0
if 'sys_uuid' in ulog.msg_info_dict:
obj.vehicle_uuid = escape(ulog.msg_info_dict['sys_uuid'])
for m in ulog.logged_messages:
if m.log_level <= ord('3'):
obj.num_logged_errors += 1
if m.log_level == ord('4'):
obj.num_logged_warnings += 1
try:
cur_dataset = ulog.get_dataset('vehicle_status')
flight_mode_changes = cur_dataset.list_value_changes('nav_state')
obj.flight_modes = {int(x[1]) for x in flight_mode_changes}
# get the durations
# make sure the first entry matches the start of the logging
if len(flight_mode_changes) > 0:
flight_mode_changes[0] = (ulog.start_timestamp, flight_mode_changes[0][1])
flight_mode_changes.append((ulog.last_timestamp, -1))
for i in range(len(flight_mode_changes)-1):
flight_mode = int(flight_mode_changes[i][1])
flight_mode_duration = int((flight_mode_changes[i+1][0] -
flight_mode_changes[i][0]) / 1e6)
obj.flight_mode_durations.append((flight_mode, flight_mode_duration))
except (KeyError, IndexError) as error:
obj.flight_modes = set()
# logging start time & date
try:
# get the first non-zero timestamp
gps_data = ulog.get_dataset('vehicle_gps_position')
indices = np.nonzero(gps_data.data['time_utc_usec'])
if len(indices[0]) > 0:
obj.start_time_utc = int(gps_data.data['time_utc_usec'][indices[0][0]] / 1000000)
except:
# Ignore. Eg. if topic not found
pass
return obj
def to_json_dict(self):
jsondict = {}
jsondict['duration_s'] = int(self.duration_s)
jsondict['mav_type'] = self.mav_type
jsondict['estimator'] = self.estimator
jsondict['sys_autostart_id'] = int(self.sys_autostart_id)
jsondict['sys_hw'] = self.sys_hw
jsondict['ver_sw'] = self.ver_sw
jsondict['ver_sw_release'] = self.ver_sw_release
jsondict['num_logged_errors'] = self.num_logged_errors
jsondict['num_logged_warnings'] = self.num_logged_warnings
jsondict['flight_modes'] = list(self.flight_modes)
jsondict['vehicle_uuid'] = self.vehicle_uuid
jsondict['flight_mode_durations'] = self.flight_mode_durations
return jsondict
class DBVehicleData:
""" simple class that contains information from the DB entry of a vehicle """
def __init__(self):
self.uuid = None
self.log_id = ''
self.name = ''
self.flight_time = 0
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,378 | PX4/flight_review | refs/heads/main | /app/tornado_handlers/download.py | """
Tornado handler for the download page
"""
from __future__ import print_function
import os
from html import escape
import sys
import uuid
import shutil
import sqlite3
import tornado.web
from pyulog.ulog2kml import convert_ulog2kml
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../plot_app'))
from helper import get_log_filename, validate_log_id, \
flight_modes_table, load_ulog_file, get_default_parameters
from config import get_db_filename, get_kml_filepath
#pylint: disable=relative-beyond-top-level
from .common import CustomHTTPError, TornadoRequestHandlerBase
#pylint: disable=abstract-method, unused-argument
class DownloadHandler(TornadoRequestHandlerBase):
""" Download log file Tornado request handler """
def get(self, *args, **kwargs):
""" GET request callback """
log_id = self.get_argument('log')
if not validate_log_id(log_id):
raise tornado.web.HTTPError(400, 'Invalid Parameter')
log_file_name = get_log_filename(log_id)
download_type = self.get_argument('type', default='0')
if not os.path.exists(log_file_name):
raise tornado.web.HTTPError(404, 'Log not found')
def get_original_filename(default_value, new_file_suffix):
"""
get the uploaded file name & exchange the file extension
"""
try:
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute('select OriginalFilename '
'from Logs where Id = ?', [log_id])
db_tuple = cur.fetchone()
if db_tuple is not None:
original_file_name = escape(db_tuple[0])
if original_file_name[-4:].lower() == '.ulg':
original_file_name = original_file_name[:-4]
return original_file_name + new_file_suffix
cur.close()
con.close()
except:
print("DB access failed:", sys.exc_info()[0], sys.exc_info()[1])
return default_value
if download_type == '1': # download the parameters
ulog = load_ulog_file(log_file_name)
param_keys = sorted(ulog.initial_parameters.keys())
self.set_header("Content-Type", "text/plain")
self.set_header('Content-Disposition', 'inline; filename=params.txt')
delimiter = ', '
for param_key in param_keys:
self.write(param_key)
self.write(delimiter)
self.write(str(ulog.initial_parameters[param_key]))
self.write('\n')
elif download_type == '2': # download the kml file
kml_path = get_kml_filepath()
kml_file_name = os.path.join(kml_path, log_id.replace('/', '.')+'.kml')
# check if chached file exists
if not os.path.exists(kml_file_name):
print('need to create kml file', kml_file_name)
def kml_colors(flight_mode):
""" flight mode colors for KML file """
if flight_mode not in flight_modes_table: flight_mode = 0
color_str = flight_modes_table[flight_mode][1][1:] # color in form 'ff00aa'
# increase brightness to match colors with template
rgb = [int(color_str[2*x:2*x+2], 16) for x in range(3)]
for i in range(3):
rgb[i] += 40
if rgb[i] > 255: rgb[i] = 255
color_str = "".join(map(lambda x: format(x, '02x'), rgb))
return 'ff'+color_str[4:6]+color_str[2:4]+color_str[0:2] # KML uses aabbggrr
style = {'line_width': 2}
# create in random temporary file, then move it (to avoid races)
try:
temp_file_name = kml_file_name+'.'+str(uuid.uuid4())
convert_ulog2kml(log_file_name, temp_file_name,
'vehicle_global_position', kml_colors,
style=style,
camera_trigger_topic_name='camera_capture')
shutil.move(temp_file_name, kml_file_name)
except Exception as e:
print('Error creating KML file', sys.exc_info()[0], sys.exc_info()[1])
raise CustomHTTPError(400, 'No Position Data in log') from e
kml_dl_file_name = get_original_filename('track.kml', '.kml')
# send the whole KML file
self.set_header("Content-Type", "application/vnd.google-earth.kml+xml")
self.set_header('Content-Disposition', 'attachment; filename='+kml_dl_file_name)
with open(kml_file_name, 'rb') as kml_file:
while True:
data = kml_file.read(4096)
if not data:
break
self.write(data)
self.finish()
elif download_type == '3': # download the non-default parameters
ulog = load_ulog_file(log_file_name)
param_keys = sorted(ulog.initial_parameters.keys())
self.set_header("Content-Type", "text/plain")
self.set_header('Content-Disposition', 'inline; filename=params.txt')
delimiter = ', '
# Use defaults from log if available
if ulog.has_default_parameters:
system_defaults = ulog.get_default_parameters(0)
airframe_defaults = ulog.get_default_parameters(1)
for param_key in param_keys:
try:
param_value = ulog.initial_parameters[param_key]
is_default = True
if param_key in airframe_defaults:
is_default = param_value == airframe_defaults[param_key]
elif param_key in system_defaults:
is_default = param_value == system_defaults[param_key]
if not is_default:
self.write(param_key)
self.write(delimiter)
self.write(str(param_value))
self.write('\n')
except:
pass
else:
default_params = get_default_parameters()
for param_key in param_keys:
try:
param_value = str(ulog.initial_parameters[param_key])
is_default = False
if param_key in default_params:
default_param = default_params[param_key]
if default_param['type'] == 'FLOAT':
is_default = abs(float(default_param['default']) -
float(param_value)) < 0.00001
else:
is_default = int(default_param['default']) == int(param_value)
if not is_default:
self.write(param_key)
self.write(delimiter)
self.write(param_value)
self.write('\n')
except:
pass
else: # download the log file
self.set_header('Content-Type', 'application/octet-stream')
self.set_header("Content-Description", "File Transfer")
self.set_header('Content-Disposition', 'attachment; filename={}'.format(
os.path.basename(log_file_name)))
with open(log_file_name, 'rb') as log_file:
while True:
data = log_file.read(4096)
if not data:
break
self.write(data)
self.finish()
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,379 | PX4/flight_review | refs/heads/main | /app/plot_app/leaflet.py | """ Data extraction/conversion methods to get the flight path that is passed to
a Leaflet map via jinja arguments """
from colors import HTML_color_to_RGB
from config_tables import flight_modes_table
from helper import get_lat_lon_alt_deg
#pylint: disable=consider-using-enumerate
def ulog_to_polyline(ulog, flight_mode_changes):
""" extract flight mode colors and position data from the log
:return: tuple(position data list, flight modes)
"""
def rgb_colors(flight_mode):
""" flight mode color from a flight mode """
if flight_mode not in flight_modes_table: flight_mode = 0
color_str = flight_modes_table[flight_mode][1] # color in form '#ff00aa'
# increase brightness to match colors with template
rgb = HTML_color_to_RGB(color_str)
for i in range(3):
rgb[i] += 40
if rgb[i] > 255: rgb[i] = 255
return "#" + "".join(map(lambda x: format(x, '02x'), rgb))
cur_data = ulog.get_dataset('vehicle_gps_position')
pos_lat, pos_lon, _ = get_lat_lon_alt_deg(ulog, cur_data)
pos_t = cur_data.data['timestamp']
if 'fix_type' in cur_data.data:
indices = cur_data.data['fix_type'] > 2 # use only data with a fix
pos_lon = pos_lon[indices]
pos_lat = pos_lat[indices]
pos_t = pos_t[indices]
pos_datas = []
flight_modes = []
last_t = 0
minimum_interval_s = 0.1
current_flight_mode_idx = 0
for i in range(len(pos_lon)):
curr_t = pos_t[i]
if (curr_t - last_t) / 1e6 > minimum_interval_s:
pos_datas.append([pos_lat[i], pos_lon[i]])
last_t = curr_t
while current_flight_mode_idx < len(flight_mode_changes) - 1 and \
flight_mode_changes[current_flight_mode_idx][0] <= curr_t:
current_flight_mode = flight_mode_changes[current_flight_mode_idx][1]
current_flight_mode_idx += 1
flight_modes.append([rgb_colors(current_flight_mode), i])
flight_modes.append(['', len(pos_lon)])
return (pos_datas, flight_modes)
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,380 | PX4/flight_review | refs/heads/main | /app/plot_app/overview_generator.py | """
Module for generating overview map
"""
import os
#pylint: disable=ungrouped-imports
import matplotlib
matplotlib.use('Agg')
import smopy
import matplotlib.pyplot as plt
from config import get_log_filepath, get_overview_img_filepath
from helper import load_ulog_file, get_lat_lon_alt_deg
MAXTILES = 16
def get_zoom(input_box, z=18):
"""
Return acceptable zoom - we take this function from Map to get lover zoom
"""
box_tile = smopy.get_tile_box(input_box, z)
box = smopy.correct_box(box_tile, z)
sx, sy = smopy.get_box_size(box)
if sx * sy >= MAXTILES:
z = get_zoom(input_box, z - 1)
return z
def generate_overview_img_from_id(log_id):
''' This function will load file and save overview from/into configured directories
'''
ulog_file = os.path.join(get_log_filepath(), log_id+'.ulg')
ulog = load_ulog_file(ulog_file)
generate_overview_img(ulog, log_id)
def generate_overview_img(ulog, log_id):
''' This funciton will generate overwie for loaded ULog data
'''
output_filename = os.path.join(get_overview_img_filepath(), log_id+'.png')
if os.path.exists(output_filename):
return
try:
cur_dataset = ulog.get_dataset('vehicle_gps_position')
indices = cur_dataset.data['fix_type'] > 2 # use only data with a fix
lat, lon, _ = get_lat_lon_alt_deg(ulog, cur_dataset)
lat = lat[indices]
lon = lon[indices]
min_lat = min(lat)
max_lat = max(lat)
min_lon = min(lon)
max_lon = max(lon)
z = max(get_zoom((min_lat, min_lon, max_lat, max_lon)) - 2, 0)
render_map = smopy.Map((min_lat, min_lon, max_lat, max_lon), z=z)
fig, axes = plt.subplots(nrows=1, ncols=1)
render_map.show_mpl(figsize=(8, 6), ax=axes)
x, y = render_map.to_pixels(lat, lon)
axes.plot(x, y, 'r')
axes.set_axis_off()
plt.savefig(output_filename, bbox_inches='tight')
plt.close(fig)
print('Saving overview file '+ output_filename)
except:
# Ignore. Eg. if topic not found
print('Error generating overview file: '+ output_filename+' - No GPS?')
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,381 | PX4/flight_review | refs/heads/main | /app/tornado_handlers/multipart_streamer.py |
"""Multipart/form-data streamer for tornado 4.3"""
import os
import re
import tempfile
import shutil
__copyright__ = """
Copyright 2015 Laszlo Zsolt Nagy (nagylzs@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
"""
class ParseError(Exception):
"""This exception is raised when the streamed data cannot be parsed as multipart/form-data."""
pass
class SizeLimitError(Exception):
"""This exception is raised when the size of a single field exceeds the allowed limit."""
pass
class StreamedPart:
"""Represents a part of the multipart/form-data."""
def __init__(self, streamer, headers):
self.streamer = streamer
self.headers = headers
self._size = 0
def get_size(self):
""" return the size of this part """
return self._size
size = property(get_size, doc="Size of the streamed part. " +
"It will be a growing value while the part is streamed.")
def feed(self, data):
"""Feed data into the stream.
:param data: Binary string that has arrived from the client."""
raise NotImplementedError
def finalize(self):
"""Called after all data has arrived for the part."""
pass
def release(self):
"""Called when used resources should be freed up.
This is called from MultiPartStreamer.release_parts."""
pass
def get_payload(self):
"""Load part data and return it as a binary string.
Warning! This method will load the whole data into memory.
First you should check the get_size() method the see if the data fits
into memory.
.. note:: In the base class, this is not implemented.
"""
raise NotImplementedError
def get_ct_params(self):
"""Get Content-Disposition parameters.
:return: If there is no content-disposition header for the part, then
it returns an empty list.
Otherwise it returns a list of values given for
Content-Disposition headers.
:rtype: list
"""
for header in self.headers:
if header.get("name", "").lower().strip() == "content-disposition":
return header.get("params", [])
return []
def get_ct_param(self, name, def_val=None):
"""Get content-disposition parameter.
:param name: Name of the parameter, case insensitive.
:param def_val: Value to return when the parameter was not found.
"""
ct_params = self.get_ct_params()
for param_name in ct_params:
if param_name.lower().strip() == name:
return ct_params[name]
return def_val
def get_name(self):
"""Get name of the part.
If the multipart form data was sent by a web browser, then the name of
the part is the name of the input field in the form.
:return: Name of the parameter (as given in the ``name`` parameter of
the content-disposition header)
When there is no ``name``parameter, returns None. Although all
parts in multipart/form-data should have a name.
"""
return self.get_ct_param("name", None)
def get_filename(self):
"""Get filename of the part.
If the multipart form data was sent by a web browser, then the name of
the part is the filename of the input field in the form.
:return: filename of the parameter (as given in the ``filename``
parameter of the content-disposition header)
When there is no ``filename``parameter, returns None. All
browsers will send this parameter to all file input fields.
"""
return self.get_ct_param("filename", None)
def is_file(self):
"""Return if the part is a posted file.
Please note that a program can post huge amounts of data without giving
a filename."""
return bool(self.get_filename())
class TemporaryFileStreamedPart(StreamedPart):
"""A multi part streamer/part that feeds data into a named temporary file.
This class has an ``f_out`` attribute that is bound to a NamedTemporaryFile.
"""
def __init__(self, streamer, headers, tmp_dir=None):
"""Create a new streamed part that writes part data into a NamedTemporaryFile.
:param streamer: The MultiPartStreamer that feeds this streamed part.
:param headers: A dict of part headers
:param tmp_dir: Directory for the NamedTemporaryFile. Will be passed to
NamedTemporaryFile constructor.
The NamedTemporaryFile is available through the ``f_out`` attribute. It
is created with delete=False, argument, so the temporary file is not
automatically deleted when closed. You can use the move() method to move
the temporary file to a different location. If you do not call the
move() method, then the file will be deleted when release() is called.
"""
super().__init__(streamer, headers)
self.is_moved = False
self.is_finalized = False
self.f_out = tempfile.NamedTemporaryFile(dir=tmp_dir, delete=False)
def feed(self, data):
"""Feed data into the stream.
:param data: Binary string that has arrived from the client.
This version writes data into a temporary file."""
self.f_out.write(data)
def finalize(self):
try:
self.f_out.flush()
self.is_finalized = True
finally:
super().finalize()
def move(self, file_path):
"""Move the temporary file to a new location.
:param file_path: New file path for the file.
This method will first close the temporary file, then move it to the new location.
"""
if not self.is_finalized:
raise RuntimeError("Cannot move temporary file: stream is not finalized yet.")
if self.is_moved:
raise RuntimeError("Cannot move temporary file: it has already been moved.")
self.f_out.close()
shutil.move(self.f_out.name, file_path)
self.is_moved = True
def release(self):
"""Release resources assigned to the part.
If the temporary file has been moved with the move() method, then this
method does nothing. Otherwise it closes the temporary file and deletes
it from disk."""
try:
if not self.is_moved:
self.f_out.close()
os.unlink(self.f_out.name)
finally:
super().release()
def get_payload(self):
"""Load part data from disk and return it.
Warning! This will load the entire payload into memory!"""
if not self.is_finalized:
raise RuntimeError("Cannot read temporary file: stream is not finalized yet.")
if self.is_moved:
raise RuntimeError("Cannot read temporary file: it has already been moved.")
self.f_out.seek(0)
return self.f_out.read()
def get_payload_partial(self, num_bytes):
"""Load a part of part data from disk and return it. """
if not self.is_finalized:
raise RuntimeError("Cannot read temporary file: stream is not finalized yet.")
if self.is_moved:
raise RuntimeError("Cannot read temporary file: it has already been moved.")
self.f_out.seek(0)
return self.f_out.read(num_bytes)
class MultiPartStreamer:
"""Parse a stream of multpart/form-data.
Useful for request handlers decorated with ``tornado.web.stream_request_body``.
"""
SEP = b"\r\n" # line separator in multipart/form-data
L_SEP = len(SEP)
PAT_HEADER_VALUE = re.compile(r"""([^:]+):\s+([^\s;]+)(.*)""")
PAT_HEADER_PARAMS = re.compile(r""";\s*([^=]+)=\"(.*?)\"(.*)""")
# Encoding for the header values. Only header name and parameters
# will be decoded. Streamed data will remain binary.
# This is required because multipart/form-data headers cannot
# be parsed without a valid encoding.
header_encoding = "UTF-8"
def __init__(self, total):
"""Create a new PostDataStreamer
:param total: Total number of bytes in the stream. This is what the http
client sends as the Content-Length header of the whole form.
"""
self.buf = b""
self.dlen = None
self.delimiter = None
self.in_data = False
self.headers = []
self.parts = []
self.total = total
self.received = 0
self.part = None
def _get_raw_header(self, data):
"""Return raw header data.
Internal method. Do not call directly.
:param data: A string containing raw data from the form part
:return: A tuple of (header_value, tail) where header_value is the first
line of the form part. If there is no first line yet (e.g. the
whole data is a single line) then header_value will be None.
"""
idx = data.find(self.SEP)
if idx >= 0:
return data[:idx], data[idx + self.L_SEP:]
return None, data
def _parse_header(self, header):
"""Parse raw header data.
Internal method. Do not call directly.
:param header: Raw data of the part.
:return: A dict that contains the ``name``, ``value`` and ``params`` for the header.
If the header is a simple value, then it may only return a dict with a ``value``.
"""
header = header.decode(self.header_encoding)
res = self.PAT_HEADER_VALUE.match(header)
if res:
name, value, tail = res.groups()
params = {}
hdr = {"name": name, "value": value, "params": params}
while True:
res = self.PAT_HEADER_PARAMS.match(tail)
if not res:
break
hdr_name, hdr_value, tail = res.groups()
params[hdr_name] = hdr_value
return hdr
return {"value": header}
def _begin_part(self, headers):
"""Internal method called when a new part is started in the stream.
:param headers: A dict of headers as returned by parse_header."""
self.part = self.create_part(headers)
assert isinstance(self.part, StreamedPart)
self.parts.append(self.part)
def _feed_part(self, data):
"""Internal method called when content is added to the current part.
:param data: Raw data for the current part."""
# noinspection PyProtectedMember
self.part._size += len(data)
self.part.feed(data)
def _end_part(self):
"""Internal method called when receiving the current part has finished.
The implementation of this does nothing, but it can be overriden to do
something with ``self.fout``."""
self.part.finalize()
def data_received(self, chunk):
"""Receive a chunk of data for the form.
:param chunk: Binary string that was received from the http(s) client.
This method incrementally parses stream data, finds part headers and
feeds binary data into created StreamedPart instances. You need to call
this when a chunk of data is available for the part.
This method may raise a ParseError if the received data is malformed.
"""
self.received += len(chunk)
self.on_progress(self.received, self.total)
self.buf += chunk
if not self.delimiter:
self.delimiter, self.buf = self._get_raw_header(self.buf)
if self.delimiter:
self.delimiter += self.SEP
self.dlen = len(self.delimiter)
elif len(self.buf) > 1000:
raise ParseError("Cannot find multipart delimiter")
else:
return
while True:
if self.in_data:
if len(self.buf) > 3 * self.dlen:
idx = self.buf.find(self.SEP + self.delimiter)
if idx >= 0:
self._feed_part(self.buf[:idx])
self._end_part()
self.buf = self.buf[idx + len(self.SEP + self.delimiter):]
self.in_data = False
else:
limit = len(self.buf) - 2 * self.dlen
self._feed_part(self.buf[:limit])
self.buf = self.buf[limit:]
return
else:
return
if not self.in_data:
while True:
header, self.buf = self._get_raw_header(self.buf)
if header == b"":
assert self.delimiter
self.in_data = True
self._begin_part(self.headers)
self.headers = []
break
if header:
self.headers.append(self._parse_header(header))
else:
# Header is None, not enough data yet
return
def data_complete(self):
"""Call this after the last receive() call, e.g. when all data arrived for the form.
You MUST call this before using the parts."""
if self.in_data:
idx = self.buf.rfind(self.SEP + self.delimiter[:-2])
if idx > 0:
self._feed_part(self.buf[:idx])
self._end_part()
def create_part(self, headers):
"""Called when a new part needs to be created.
:param headers: A dict of header values for the new part to be created.
You can override this to create a custom StreamedPart. The default method creates a
TemporaryFileStreamedPart that streams data into a named temporary file.
"""
return TemporaryFileStreamedPart(self, headers)
def release_parts(self):
"""Call this to release resources for all parts created.
This method will call the release() method on all parts created for the stream."""
for part in self.parts:
part.release()
def get_parts_by_name(self, part_name):
"""Get a parts by name.
:param part_name: Name of the part. This is case sensitive!
Attention! A form may have posted multiple values for the same name. So
the return value of this method is a list of parts!
"""
return [part for part in self.parts if part.get_name() == part_name]
def get_values(self, names, size_limit=10 * 1024):
"""Return a dictionary of values for the given field names.
:param names: A list of field names, case sensitive.
:param size_limit: Maximum size of the value of a single field.
If a field's size exceeds this value, then SizeLimitError is raised.
Caveats:
* do not use this for big file values, because values are loaded into memory
* a form may have posted multiple values for a field name. This
method returns the first available value for that name. If the
form might contain multiple values for the same name, then do not
use this method. To get all values for a name, use the
get_parts_by_name method instead.
Tip: use get_nonfile_parts() to get a list of parts that are not
originally files (read the docstring)
"""
res = {}
for name in names:
parts = self.get_parts_by_name(name)
if not parts:
continue
size = parts[0].size
if size > size_limit:
raise SizeLimitError("Part size=%s > limit=%s" % (size, size_limit))
res[name] = parts[0].get_payload()
return res
def get_nonfile_parts(self):
"""Get a list of parts that are originally not files.
It examines the filename attribute of the Content-Disposition header.
Be aware that these fields still may be huge in size. A custom http
client can post huge amounts of data without giving Content-Disposition.
"""
return [part for part in self.parts if not part.is_file()]
def on_progress(self, received, total):
"""Override this function to handle progress of receiving data.
:param received: Number of bytes received
:param total: Total bytes to be received.
"""
pass
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,382 | PX4/flight_review | refs/heads/main | /app/backup_db.py | #! /usr/bin/env python3
# Script to backup the SQLite DB
from __future__ import print_function
import sys
import os
import datetime
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plot_app'))
from plot_app.config import get_db_filename
db_filename = get_db_filename()
backup_file = "backups/backup_db_"+ \
datetime.datetime.now().strftime('%Y_%m_%d-%H_%M')
if not os.path.exists('backups'):
os.mkdir('backups')
os.system('sqlite3 '+db_filename+' ".backup '+backup_file+'.sqlite"')
os.system('sqlite3 '+db_filename+' "SELECT * from Logs" >'+backup_file+'.sql')
os.system('sqlite3 '+db_filename+' "SELECT * from Vehicle" >'+backup_file+'_vehicle.sql')
num_lines = sum(1 for line in open(backup_file+'.sql'))
print('Backed up {} records to {}'.format(num_lines, backup_file+'.sqlite'))
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,383 | PX4/flight_review | refs/heads/main | /app/plot_app/main.py | """ module that gets executed on a plotting page request """
from timeit import default_timer as timer
import sys
import sqlite3
import traceback
import os
from html import escape
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models.widgets import Div
from helper import *
from config import *
from colors import HTML_color_to_RGB
from db_entry import *
from configured_plots import generate_plots
from pid_analysis_plots import get_pid_analysis_plots
from statistics_plots import StatisticsPlots
#pylint: disable=invalid-name, redefined-outer-name
GET_arguments = curdoc().session_context.request.arguments
if GET_arguments is not None and 'stats' in GET_arguments:
# show the statistics page
plots = []
start_time = timer()
statistics = StatisticsPlots(plot_config, debug_verbose_output())
print_timing("Data Loading Stats", start_time)
start_time = timer()
# title
div = Div(text="<h3>Statistics</h3>")
plots.append(column(div))
div = Div(text="<h4>All Logs</h4>")
plots.append(column(div))
p = statistics.plot_log_upload_statistics([colors8[0], colors8[1], colors8[3],
colors8[4], colors8[5]])
plots.append(p)
div_info = Div(text="Number of Continous Integration (Simulation Tests) Logs: %i<br />" \
"Total Number of Logs on the Server: %i" %
(statistics.num_logs_ci(), statistics.num_logs_total()))
plots.append(column(div_info))
# div = Div(text="<br/><h4>Flight Report Logs "
# "<small class='text-muted'>(Public Logs only)</small></h4>")
# div_info = Div(text="Total Flight Hours over all versions: %.1f"%
# statistics.total_public_flight_duration())
# div_info_release = Div(text="Total Flight Hours for the latest major" \
# " release %s (starting from the first RC candidate): %.1f"%
# (statistics.latest_major_release()+'.x',
# statistics.total_public_flight_duration_latest_release()))
# plots.append(column([div, div_info, div_info_release]))
#
# p = statistics.plot_public_airframe_statistics()
# plots.append(p)
#
# p = statistics.plot_public_boards_statistics()
# plots.append(p)
#
# p = statistics.plot_public_boards_num_flights_statistics()
# plots.append(p)
#
# p = statistics.plot_public_flight_mode_statistics()
# plots.append(p)
# TODO: add a rating pie chart (something like
# http://bokeh.pydata.org/en/latest/docs/gallery/donut_chart.html ?)
print_timing("Plotting Stats", start_time)
curdoc().template_variables['is_stats_page'] = True
layout = column(plots, sizing_mode='scale_width')
curdoc().add_root(layout)
curdoc().title = "Flight Review - Statistics"
else:
# show the plots of a single log
start_time = timer()
ulog_file_name = 'test.ulg'
ulog_file_name = os.path.join(get_log_filepath(), ulog_file_name)
error_message = ''
log_id = ''
try:
if GET_arguments is not None and 'log' in GET_arguments:
log_args = GET_arguments['log']
if len(log_args) == 1:
log_id = str(log_args[0], 'utf-8')
if not validate_log_id(log_id):
raise ValueError('Invalid log id: {}'.format(log_id))
print('GET[log]={}'.format(log_id))
ulog_file_name = get_log_filename(log_id)
ulog = load_ulog_file(ulog_file_name)
px4_ulog = PX4ULog(ulog)
px4_ulog.add_roll_pitch_yaw()
except ULogException:
error_message = ('A parsing error occured when trying to read the file - '
'the log is most likely corrupt.')
except:
print("Error loading file:", sys.exc_info()[0], sys.exc_info()[1])
error_message = 'An error occured when trying to read the file.'
print_timing("Data Loading", start_time)
start_time = timer()
if error_message == '':
# read the data from DB
db_data = DBData()
vehicle_data = None
try:
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute('select Description, Feedback, Type, WindSpeed, Rating, VideoUrl, '
'ErrorLabels from Logs where Id = ?', [log_id])
db_tuple = cur.fetchone()
if db_tuple is not None:
db_data.description = db_tuple[0]
db_data.feedback = db_tuple[1]
db_data.type = db_tuple[2]
db_data.wind_speed = db_tuple[3]
db_data.rating = db_tuple[4]
db_data.video_url = db_tuple[5]
db_data.error_labels = sorted(
[int(x) for x in db_tuple[6].split(',') if len(x) > 0]) \
if db_tuple[6] else []
# vehicle data
if 'sys_uuid' in ulog.msg_info_dict:
sys_uuid = escape(ulog.msg_info_dict['sys_uuid'])
cur.execute('select LatestLogId, Name, FlightTime '
'from Vehicle where UUID = ?', [sys_uuid])
db_tuple = cur.fetchone()
if db_tuple is not None:
vehicle_data = DBVehicleData()
vehicle_data.log_id = db_tuple[0]
if len(db_tuple[1]) > 0:
vehicle_data.name = db_tuple[1]
try:
vehicle_data.flight_time = int(db_tuple[2])
except:
pass
cur.close()
con.close()
except:
print("DB access failed:", sys.exc_info()[0], sys.exc_info()[1])
def show_exception_page():
""" show an error page in case of an unknown/unhandled exception """
title = 'Internal Error'
error_message = ('<h3>Internal Server Error</h3>'
'<p>Please open an issue on <a '
'href="https://github.com/PX4/flight_review/issues" target="_blank">'
'https://github.com/PX4/flight_review/issues</a> with a link '
'to this log.')
div = Div(text=error_message, width=int(plot_width*0.9))
plots = [column(div, width=int(plot_width*0.9))]
curdoc().template_variables['internal_error'] = True
return (title, error_message, plots)
# check which plots to show
plots_page = 'default'
if GET_arguments is not None and 'plots' in GET_arguments:
plots_args = GET_arguments['plots']
if len(plots_args) == 1:
plots_page = str(plots_args[0], 'utf-8')
if plots_page == 'pid_analysis':
try:
link_to_main_plots = '?log='+log_id
plots = get_pid_analysis_plots(ulog, px4_ulog, db_data,
link_to_main_plots)
title = 'Flight Review - '+px4_ulog.get_mav_type()
except Exception as error:
# catch all errors to avoid showing a blank page. Note that if we
# get here, there's a bug somewhere that needs to be fixed!
traceback.print_exc()
title, error_message, plots = show_exception_page()
else:
# template variables
curdoc().template_variables['cur_err_ids'] = db_data.error_labels
curdoc().template_variables['mapbox_api_access_token'] = get_mapbox_api_access_token()
curdoc().template_variables['is_plot_page'] = True
curdoc().template_variables['log_id'] = log_id
flight_modes = [
{'name': 'Manual', 'color': HTML_color_to_RGB(flight_modes_table[0][1])},
{'name': 'Altitude Control', 'color': HTML_color_to_RGB(flight_modes_table[1][1])},
{'name': 'Position Control', 'color': HTML_color_to_RGB(flight_modes_table[2][1])},
{'name': 'Acro', 'color': HTML_color_to_RGB(flight_modes_table[10][1])},
{'name': 'Stabilized', 'color': HTML_color_to_RGB(flight_modes_table[15][1])},
{'name': 'Offboard', 'color': HTML_color_to_RGB(flight_modes_table[14][1])},
{'name': 'Rattitude', 'color': HTML_color_to_RGB(flight_modes_table[16][1])},
{'name': 'Auto (Mission, RTL, Follow, ...)',
'color': HTML_color_to_RGB(flight_modes_table[3][1])}
]
curdoc().template_variables['flight_modes'] = flight_modes
vtol_modes = [
{'name': 'Transition', 'color': HTML_color_to_RGB(vtol_modes_table[1][1])},
{'name': 'Fixed-Wing', 'color': HTML_color_to_RGB(vtol_modes_table[2][1])},
{'name': 'Multicopter', 'color': HTML_color_to_RGB(vtol_modes_table[3][1])},
]
curdoc().template_variables['vtol_modes'] = vtol_modes
link_to_3d_page = '3d?log='+log_id
link_to_pid_analysis_page = '?plots=pid_analysis&log='+log_id
try:
plots = generate_plots(ulog, px4_ulog, db_data, vehicle_data,
link_to_3d_page, link_to_pid_analysis_page)
title = 'Flight Review - '+px4_ulog.get_mav_type()
except Exception as error:
# catch all errors to avoid showing a blank page. Note that if we
# get here, there's a bug somewhere that needs to be fixed!
traceback.print_exc()
title, error_message, plots = show_exception_page()
else:
title = 'Error'
div = Div(text="<h3>Error</h3><p>"+error_message+"</p>", width=int(plot_width*0.9))
plots = [column(div, width=int(plot_width*0.9))]
# layout
layout = column(plots)
curdoc().add_root(layout)
curdoc().title = title
print_timing("Plotting", start_time)
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,384 | PX4/flight_review | refs/heads/main | /app/tornado_handlers/error_labels.py | """
Tornado handler for updating the error label information in the database
"""
from __future__ import print_function
import sys
import os
import sqlite3
import tornado.web
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plot_app'))
from config import *
from db_entry import *
from helper import validate_log_id, validate_error_ids
class UpdateErrorLabelHandler(tornado.web.RequestHandler):
""" Update the error label of a flight log."""
def post(self, *args, **kwargs):
""" POST request """
data = tornado.escape.json_decode(self.request.body)
log_id = data['log']
if not validate_log_id(log_id):
raise tornado.web.HTTPError(400, 'Invalid Parameter')
error_ids = data['labels']
if not validate_error_ids(error_ids):
raise tornado.web.HTTPError(400, 'Invalid Parameter')
error_id_str = ""
for error_ix, error_id in enumerate(error_ids):
error_id_str += str(error_id)
if error_ix < len(error_ids)-1:
error_id_str += ","
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute(
'UPDATE Logs SET ErrorLabels = ? WHERE Id = ?',
(error_id_str, log_id))
con.commit()
cur.close()
con.close()
self.write('OK')
def data_received(self, chunk):
""" called whenever new data is received """
pass
| {"/app/tornado_handlers/browse.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/radio_controller.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/edit_entry.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/upload.py": ["/app/tornado_handlers/common.py", "/app/tornado_handlers/send_email.py", "/app/tornado_handlers/multipart_streamer.py"], "/app/tornado_handlers/three_d.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/db_info_json.py": ["/app/tornado_handlers/common.py"], "/app/tornado_handlers/download.py": ["/app/tornado_handlers/common.py"]} |
57,400 | ChaiHuanhuan/yuvtools | refs/heads/master | /yuvtools/psnr.py | import cv2
from . import yuv
import argparse
def psnr(file1, file2, width, height, frames, start=0, channel='Y'):
"""
Calculate PSNR for each frame
:param file1: YUV file1 path
:param file2: YUV file2 path
:param width: width of the YUV
:param height: height of the YUV
:param frames: total number of frames for PSNR calculation
:param start: start frame index (default=0)
:param channel: 'Y','U','V', or 'YUV'
:return: list of the PSNR for each frame
"""
psnr_list = []
for frame in range(start, frames):
yuv1, y1, u1, v1 = yuv.read_yuv420_frame(file1, width, height, frame)
yuv2, y2, u2, v2 = yuv.read_yuv420_frame(file2, width, height, frame)
if channel == 'Y':
psnr = cv2.PSNR(y1, y2)
elif channel == 'U':
psnr = cv2.PSNR(u1, u2)
elif channel == 'V':
psnr = cv2.PSNR(v1, v2)
else:
psnr = cv2.PSNR(yuv1, yuv2)
psnr_list.append(psnr)
return psnr_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# positional arguments
parser.add_argument("file1")
parser.add_argument("file2")
# optional arguments
parser.add_argument("-s", "--size", dest="size", action="store", required=True)
parser.add_argument("-f", "--frames", dest="frames", action="store", default=1)
parser.add_argument("-c", "--channel", dest="channel", action="store", default='Y')
args = parser.parse_args()
width, height = args.size.split('x')
file1 = args.file1
file2 = args.file2
width = int(width)
height = int(height)
end = args.frames
psnr_list = psnr(file1, file2, width, height, 2, channel='Y')
print(psnr_list)
print(sum(psnr_list) / len(psnr_list))
| {"/examples/ex_bdrate.py": ["/yuvtools/bdrate.py"], "/examples/ex_psnr.py": ["/yuvtools/psnr.py"]} |
57,401 | ChaiHuanhuan/yuvtools | refs/heads/master | /examples/ex_bdrate.py | from yuvtools.bdrate import *
# use 4 QPs
# set1 = [(rate1, psnr1), (rate2, psnr2), (rate3, psnr3), (rate4, psnr4)]
set1 = [(252, 31.087504953496655), (329, 33.164891041496794), (424, 35.45001800511794), (561, 37.741308089094964)]
set2 = [(251, 31.092380475841495), (326, 32.859282956658376), (459, 35.51992100116304), (599, 37.57734516548308)]
bdrate = bdrate2(set1, set2)
print(bdrate)
| {"/examples/ex_bdrate.py": ["/yuvtools/bdrate.py"], "/examples/ex_psnr.py": ["/yuvtools/psnr.py"]} |
57,402 | ChaiHuanhuan/yuvtools | refs/heads/master | /examples/ex_psnr.py | from yuvtools.psnr import *
file1 = "foreman_64x64.yuv"
file2 = "foreman_64x64_rec.yuv"
width = 64
height = 64
psnr_list = psnr(file1, file2, width, height, 2, channel='Y')
print(psnr_list)
print(sum(psnr_list) / len(psnr_list)) | {"/examples/ex_bdrate.py": ["/yuvtools/bdrate.py"], "/examples/ex_psnr.py": ["/yuvtools/psnr.py"]} |
57,403 | ChaiHuanhuan/yuvtools | refs/heads/master | /yuvtools/bdrate.py | import numpy as np
import math
import scipy.interpolate
def bdrate2(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation adapted
Bjontegaard's metric allows to compute the average % saving in bitrate
between two rate-distortion curves [1]. This is an adaptation of that
method that fixes inconsistencies when the curve fit operation goes awry
by replacing the curve fit function with a Piecewise Cubic Hermite
Interpolating Polynomial and then integrating that by evaluating that
function at small intervals using the trapezoid method to calculate
the integral.
metric_set1 - list of tuples ( bitrate, metric ) for first graph
metric_set2 - list of tuples ( bitrate, metric ) for second graph
"""
if not metric_set1 or not metric_set2:
return 0.0
try:
# pchip_interlopate requires keys sorted by x axis. x-axis will
# be our metric not the bitrate so sort by metric.
metric_set1.sort(key=lambda tup: tup[1])
metric_set2.sort(key=lambda tup: tup[1])
# Pull the log of the rate and clamped psnr from metric_sets.
log_rate1 = [math.log(x[0]) for x in metric_set1]
metric1 = [100.0 if x[1] == float('inf') else x[1] for x in metric_set1]
log_rate2 = [math.log(x[0]) for x in metric_set2]
metric2 = [100.0 if x[1] == float('inf') else x[1] for x in metric_set2]
# Integration interval. This metric only works on the area that's
# overlapping. Extrapolation of these things is sketchy so we avoid.
min_int = max([min(metric1), min(metric2)])
max_int = min([max(metric1), max(metric2)])
# No overlap means no sensible metric possible.
if max_int <= min_int:
return 0.0
# Use Piecewise Cubic Hermite Interpolating Polynomial interpolation to
# create 100 new samples points separated by interval.
lin = np.linspace(min_int, max_int, num=100, retstep=True)
interval = lin[1]
samples = lin[0]
v1 = scipy.interpolate.pchip_interpolate(metric1, log_rate1, samples)
v2 = scipy.interpolate.pchip_interpolate(metric2, log_rate2, samples)
# Calculate the integral using the trapezoid method on the samples.
int_v1 = np.trapz(v1, dx=interval)
int_v2 = np.trapz(v2, dx=interval)
# Calculate the average improvement.
avg_exp_diff = (int_v2 - int_v1) / (max_int - min_int)
except (TypeError, ZeroDivisionError, ValueError, np.RankWarning) as e:
return 0
# Convert to a percentage.
avg_diff = (math.exp(avg_exp_diff) - 1) * 100
return avg_diff | {"/examples/ex_bdrate.py": ["/yuvtools/bdrate.py"], "/examples/ex_psnr.py": ["/yuvtools/psnr.py"]} |
57,404 | ChaiHuanhuan/yuvtools | refs/heads/master | /yuvtools/yuv.py | import numpy as np
def read_yuv420_frame(file, width, height, frame_index=0, bit_depth=8):
size = int(width * height * 3 / 2)
offset = size * frame_index
frame = np.fromfile(file, dtype='uint8', offset=offset)
y_start = 0
y_end = width * height
u_start = y_end
u_end = u_start + int(width * height / 4)
v_start = u_end
v_end = v_start + int(width * height / 4)
y = frame[y_start:y_end].reshape(height, width)
u = frame[u_start:u_end].reshape(int(height/2), int(width/2))
v = frame[v_start:v_end].reshape(int(height/2), int(width/2))
return frame, y, u, v
| {"/examples/ex_bdrate.py": ["/yuvtools/bdrate.py"], "/examples/ex_psnr.py": ["/yuvtools/psnr.py"]} |
57,405 | ChaiHuanhuan/yuvtools | refs/heads/master | /setup.py | from setuptools import setup
setup(
name = 'yuvtools',
version = '0.0.1',
description = 'PSNR/BD-Rate with YUV Files',
url = 'https://github.com/brayden-jo/yuvtools',
author = 'Brayden Jo',
author_email = 'brayden.jo@outlook.com',
install_requires= ['scipy', 'opencv-python'],
license = 'MIT',
packages = ['yuvtools'],
zip_safe = False
) | {"/examples/ex_bdrate.py": ["/yuvtools/bdrate.py"], "/examples/ex_psnr.py": ["/yuvtools/psnr.py"]} |
57,410 | Ajax12345/Protest-Game | refs/heads/master | /game_main.py | import flask, random, string
import game_user, json, pusher
import game_manager, os, re
from werkzeug import secure_filename
import user_manager, functools
app = flask.Flask(__name__)
app.secret_key = ''.join(random.choice(string.ascii_letters) for _ in range(30))
UPLOAD_FOLDER = 'class_rosters'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
pusher_client = pusher.Pusher(
app_id='814342',
key='f7e3f6c14176cdde1625',
secret='5f4648c5a702b25bdb23',
cluster='us2',
ssl=True
)
def is_loggedin(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if all(b is None for _, b in flask.session.items()):
return flask.redirect('/')
return f(*args, **kwargs)
return wrapper
@app.route('/', methods=['GET'])
def home():
return flask.render_template('game_test.html')
@app.route('/signout')
def signout():
for a, _ in flask.session.items():
flask.session[a] = None
return flask.redirect('/login')
@app.route('/login', methods=['GET'])
def login():
return flask.render_template('login.html')
@app.route('/login_user')
def login_user():
_result = user_manager.User.get_user(int(flask.request.args.get('role')), **json.loads(flask.request.args.get('payload')))
if _result:
flask.session['id'] = int(_result.id)
flask.session['role'] = int(_result.role)
return flask.jsonify({'success':'True' if _result else 'False'})
@app.route('/dashboard', methods=['GET'])
@is_loggedin
def dashboard():
return flask.render_template('student_dashboard.html', user = user_manager.User.get_user(flask.session['role'], id=flask.session['id']))
@app.route('/game/<val>/<tuser>', methods=['GET'])
@is_loggedin
def game(val, tuser):
_result = game_manager.Game.load_game(int(val), tuser)
return '<h1>404</h1>' if isinstance(_result, dict) else flask.render_template('game_pannel1.html', game = _result)
@app.route('/log_message')
def log_message():
return flask.jsonify({'success':game_manager.Game.log_message(json.loads(flask.request.args.get('payload')))})
@app.route('/get_message_history')
def get_message_history():
return flask.jsonify({'html':flask.render_template('render_messages.html', messages = game_manager.Game.get_chat_history(json.loads(flask.request.args.get('payload'))))})
@app.route('/can_add_reaction')
def can_add_reaction():
return flask.jsonify(game_manager.Game.can_add_reaction(json.loads(flask.request.args.get('payload'))))
@app.route('/get_gametime')
def get_gametime():
return flask.jsonify({'time':game_manager.Game.get_gametime(json.loads(flask.request.args.get('payload')))})
@app.route('/update_gametime')
def update_gametime():
game_manager.Game.update_gametime(json.loads(flask.request.args.get('payload')))
return flask.jsonify({'success':'True'})
@app.route('/get_scores')
def get_scores():
return flask.jsonify({'scores':game_manager.Game.get_scores(json.loads(flask.request.args.get('payload')))})
@app.route('/add_player_marker')
def add_player_marker():
return flask.jsonify(game_manager.Game.add_player_position(json.loads(flask.request.args.get('payload'))))
@app.route('/get_all_markers')
def get_all_markers():
return flask.jsonify({'markers':json.dumps(game_manager.Game.get_all_markers(json.loads(flask.request.args.get('payload'))))})
@app.route('/log_reaction')
def log_reaction():
return flask.jsonify(game_manager.Game.log_reaction(json.loads(flask.request.args.get('payload'))))
@app.route("/pusher/auth", methods=['POST'])
def pusher_authentication():
# pusher_client is obtained through pusher.Pusher( ... )
auth = pusher_client.authenticate(
channel=flask.request.form['channel_name'],
socket_id=flask.request.form['socket_id']
)
return json.dumps(auth)
@app.route('/create', methods=['GET'])
@is_loggedin
def create():
return flask.render_template('new_game.html', user = user_manager.User.get_user(flask.session['role'], id=flask.session['id']))
@app.route('/add-class', methods=['GET'])
@is_loggedin
def create_class():
return flask.render_template('create_class.html', user = user_manager.User.get_user(flask.session['role'], id=flask.session['id']))
@app.route('/test_post', methods=['POST'])
def test_post():
_file = flask.request.files['myfile']
filename = secure_filename(_file.filename)
_id, _ext = len(os.listdir("class_rosters"))+1, re.findall("(?<=\.)\w+$", filename)[0]
_file.save(os.path.join(app.config['UPLOAD_FOLDER'], f'class_roster{_id}.{_ext}'))
return flask.redirect(f'/class/{game_manager.Classes.create_class(flask.request.form["classname"], _id, 1)}')
@app.route('/class/<id>', methods=['GET'])
def get_class(id):
return f'<h1>class({id})</h1>'
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
if __name__ == '__main__':
app.debug = True
app.run()
| {"/game_main.py": ["/game_user.py", "/game_manager.py", "/user_manager.py"], "/user_manager.py": ["/game_utilites.py"], "/game_manager.py": ["/game_utilites.py", "/user_manager.py"]} |
57,411 | Ajax12345/Protest-Game | refs/heads/master | /game_user.py | import json, typing
class TestUser:
def __init__(self, _payload:dict) -> None:
self.__dict__ = _payload
@classmethod
def get_user_login(cls, **kwargs:dict) -> typing.Callable:
return cls([{'id':a, **b} for a, b in json.load(open('creds.json')).items() if all(b.get(c) == d for c, d in kwargs.items())][0])
@classmethod
def get_user(cls, _id:int) -> typing.Callable:
return cls({'id':_id, **json.load(open('creds.json'))[_id]})
@property
def to_dict(self) -> dict:
return self.__dict__
| {"/game_main.py": ["/game_user.py", "/game_manager.py", "/user_manager.py"], "/user_manager.py": ["/game_utilites.py"], "/game_manager.py": ["/game_utilites.py", "/user_manager.py"]} |
57,412 | Ajax12345/Protest-Game | refs/heads/master | /user_manager.py | import typing, tigerSqlite, game_utilites
class User:
"""
filename:protest_users.db
tablename:users
columns:id real, data text
"""
def __init__(self, _data:dict) -> None:
self.__dict__ = _data
def __bool__(self) -> bool:
return True
@classmethod
@game_utilites.validate_add_user
def add_user(cls, _all_data:typing.List[dict], _payload:dict) -> typing.Callable:
new_id = (lambda x:1 if not x else max(x)+1)([a for a, _ in _all_data])
tigerSqlite.Sqlite('protest_users.db').insert('users', ('id', new_id), ('data', _payload))
return cls({'id':new_id, **_payload})
@classmethod
def get_user(cls, role:int, **kwargs:dict) -> typing.Callable:
_formatted = [{'id':a, **b} for a, b in tigerSqlite.Sqlite('protest_users.db').get_id_data('users') if int(b['role']) == int(role)]
return (lambda x:False if not x else cls(x[0]))([i for i in _formatted if all(i[j] == k for j, k in kwargs.items())])
if __name__ == '__main__':
_r = User.add_user({'name':'Joe Petullo', 'role':False, 'email':'josephpetullo65@@gmail.com'})
print(_r) | {"/game_main.py": ["/game_user.py", "/game_manager.py", "/user_manager.py"], "/user_manager.py": ["/game_utilites.py"], "/game_manager.py": ["/game_utilites.py", "/user_manager.py"]} |
57,413 | Ajax12345/Protest-Game | refs/heads/master | /game_manager.py | import typing, json, game_utilites
import pusher, collections, re, os, csv
import pandas as pd
import user_manager, tigerSqlite, content_manager
pusher_client = pusher.Pusher(
app_id='814342',
key='f7e3f6c14176cdde1625',
secret='5f4648c5a702b25bdb23',
cluster='us2',
ssl=True
)
class _message:
def __init__(self, _data:dict) -> None:
self.__dict__ = _data
class ReactionObj(typing.NamedTuple):
name:str
role:str
reaction:str
@property
def role_text(self):
return self.role.capitalize()
class Move:
def __init__(self, _data:dict, _round:int, _users:dict) -> None:
self.data, self.round, self.users = _data, _round, _users
def __iter__(self):
for i in self.data['moves']:
yield ReactionObj(self.users[i['player']], i['role'], i['reaction'].capitalize())
@property
def running_score_police(self):
return int(self.data['running_score']['police'])
@property
def running_score_protester(self):
return int(self.data['running_score']['protesters'])
@property
def running_score_winner(self):
return 'police' if self.running_score_police > self.running_score_protester else 'draw' if self.running_score_police == self.running_score_protester else 'protester'
@property
def running_winner_text(self):
return f'{self.running_score_winner.capitalize()}{"s" if self.running_score_winner == "protester" else ""}'
class Game:
"""
filename:game_reviews.db
tablename:reviews
columns:data text
------------------------
filename:games.db
tablename:games
columns:id real, data text
"""
def __init__(self, _payload:dict, _user:str) -> None:
self.__dict__ = {**_payload, 'loggedin':_user}
self.game_role, self.id = [i for i in self.players if (i['name'] == self.loggedin if isinstance(self.loggedin, str) else int(i['playerid']) == int(self.loggedin))][0], int(self.id)
self.game_content = None if not hasattr(self, 'content') or not isinstance(self.content, int) else content_manager.ContentManager.get_content([i['playerid'] for i in self.players if i['role'] == 'instructor'][0], self.content)
@property
def role(self):
return self.game_role['role']
@property
def display_role(self):
return self.role.capitalize()
@property
def player_id(self):
return self.game_role['playerid']
@property
def game_name(self):
return self.game_role['name']
@property
def has_content(self):
return self.game_content is not None
@property
def has_links(self):
return self.has_content and self.game_content.has_links
@property
def is_instructor(self):
return self.role == 'instructor'
@property
def chat_role(self):
return 'Protesters' if self.role == 'protester' else 'Police'
@property
def chat_role_data(self):
return self.chat_role.lower()
@classmethod
def load_game_dict(cls, _payload:dict) -> dict:
_c = [b for a, b in tigerSqlite.Sqlite('test_game_db.db').get_id_data('games') if int(a) == int(_payload['gameid'])]
return None if not _c else _c[0]
@classmethod
def update_game_dict(cls, _payload:dict) -> None:
tigerSqlite.Sqlite('test_game_db.db').update('games', [('data', _payload)], [('id', int(_payload['id']))])
@classmethod
@game_utilites.validate_user
@game_utilites.test_game_load(flag=True)
def load_game(cls, _data:dict, _id:int, _user:str) -> typing.Callable:
pass
@classmethod
def load_game_from_db(cls, _id:int, _user:int) -> typing.Callable:
_r = [b for a, b in tigerSqlite.Sqlite('games.db').get_id_data('games') if int(a) == int(_id)]
return {'success':'False'} if not _r else cls(_r[0], _user)
@classmethod
def get_chat_history(cls, _payload:dict) -> typing.List[typing.NamedTuple]:
return list(map(_message, cls.load_game_dict(_payload)[f'{_payload["role"]}_chat']))
@classmethod
def can_add_reaction(cls, _payload:dict) -> dict:
_data = cls.load_game_dict(_payload)
return {'can_add_reaction':len(_data['rounds']) < 3 and all(any(c['player'] == i['playerid'] for c in _data['board']) for i in _data['players']), 'scores':json.dumps(_data['score'])}
@classmethod
def log_message(cls, _payload:dict) -> dict:
_data = cls.load_game_dict(_payload)
cls.update_game_dict({**_data, f'{_payload["role"]}_chat':_data[f'{_payload["role"]}_chat']+[_payload['payload']]})
return {'success':'True'}
@property
def game_history(self):
_user_convert = {int(i['playerid']):i['name'] for i in self.players}
yield from [Move(a, i, _user_convert) for i, a in enumerate(self.rounds, 1)]
@property
def current_round(self):
return len(self.rounds)+1
@classmethod
def update_gametime(cls, _payload:dict) -> None:
try:
_data = cls.load_game_dict(_payload)
cls.update_game_dict({**_data, 'time':_payload['time']})
except:
pass
@classmethod
def get_gametime(cls, _payload:dict) -> str:
return cls.load_game_dict(_payload)['time']
@classmethod
def get_scores(cls, _payload:dict) -> str:
return json.dumps(cls.load_game_dict(_payload)['score'])
@classmethod
def add_player_position(cls, _payload:dict) -> dict:
_data = cls.load_game_dict(_payload)
if any(int(i['player']) == int(_payload['player']) for i in _data['board']):
return {'success':'NA'}
_new_payload = {'player':_payload['player'], 'role':_payload['role']}
_updated_data = {**_data, 'board':_data['board']+[_new_payload]}
cls.update_game_dict(_updated_data)
pusher_client.trigger('markers', f'update-markers{_payload["gameid"]}', {**_new_payload, 'candisplay':all(any(c['player'] == i['playerid'] for c in _updated_data['board']) for i in _updated_data['players'])})
return {'success':'True', 'candisplay':all(any(c['player'] == i['playerid'] for c in _updated_data['board']) for i in _updated_data['players'])}
@classmethod
def get_all_markers(cls, _payload:dict) -> typing.List[dict]:
_data = cls.load_game_dict(_payload)
return _data['board']
@classmethod
def log_reaction(cls, _payload:dict) -> dict:
#{'role': 'protester', 'gameid': 1, 'player': 3, 'reaction': 'violent'}
_data = cls.load_game_dict(_payload)
_user_convert = {int(i['playerid']):i['name'] for i in _data['players']}
pusher_client.trigger('history', f'update-history{_payload["gameid"]}', {'html':f"<span class='reactor'>{_user_convert[int(_payload['player'])]}</span><span class='mini_badge badge_{_payload['role']}'>{_payload['role'].capitalize()}</span> <span class='reacted_text'>reacted</span> <span class='reactor'>{_payload['reaction'].capitalize()}</span>"})
new_data = {**_data, 'round':_data['round']+[_payload]}
if all(any(int(i['player']) == int(c['playerid']) for i in new_data['round']) for c in new_data['players']):
convert = {'violent':1, 'nonviolent':0}
[[prot_score, _]], [[pol_score, _]] = collections.Counter([i['reaction'] for i in new_data['round'] if i['role'] == 'protester']).most_common(1), collections.Counter([i['reaction'] for i in new_data['round'] if i['role'] == 'police']).most_common(1)
p1, p2 = {int(a):{int(c):d for c, d in b.items()} for a, b in json.load(open('payoff_matrix.json')).items()}[convert[prot_score]][convert[pol_score]]
new_score = {'police':_data['score']['police']+p2, 'protesters':_data['score']['protesters']+p1}
pusher_client.trigger('scores', f'update-scores{_payload["gameid"]}', {**new_score, 'keepgoing':len(new_data['rounds'])+1 < 3})
final_data = {**_data, 'rounds':_data['rounds']+[{'moves':new_data['round'], 'matrix_score':{'police':p2, 'protesters':p1}, 'reactions':{'police':pol_score, 'protesters':prot_score}, 'running_score':new_score}], 'round':[], 'score':new_score}
cls.update_game_dict(final_data)
return {'success':'True', **new_score, 'keepgoing':len(final_data['rounds']) < 3}
cls.update_game_dict(new_data)
return {'success':'False'}
@classmethod
def post_review(cls, _payload:dict) -> None:
tigerSqlite.Sqlite('game_reviews.db').insert('reviews', ('data', _payload))
@classmethod
@game_utilites.format_game_payload
@game_utilites.update_player_games
def create_game(cls, _payload:dict, _creator:dict) -> None:
tigerSqlite.Sqlite('games.db').insert('games', ('id', _payload['id']), ('data', _payload))
class _class:
def __init__(self, _data:dict) -> None:
self.data = _data
def __repr__(self) -> str:
return json.dumps(self.data)
def __getattr__(self, _attr:str) -> typing.Any:
return self.data[_attr]
class All_classes:
def __init__(self, _classes:typing.List[_class]) -> None:
self.classes = _classes
def __bool__(self) -> bool:
return bool(self.classes)
def __iter__(self):
yield from self.classes
class Classes:
"""
filename:student_classes.db
tablename:classes
columns:id real, data text
"""
@classmethod
def csv_create_class(cls, _name:str, _filename:str, _maker:int) -> int:
_data = csv.reader(open(f'class_rosters/{_filename}'))
_new_rows = [{'name':re.sub('^\s+|\s+$', '', a), 'email':re.sub('^\s+|\s+$', '', b), 'role':False} for a, b in _data]
final_rows = [user_manager.User.add_user(i).__dict__ for i in _new_rows]
_new_id = (lambda x:1 if not x else max(x)+1)([a for a, _ in tigerSqlite.Sqlite('student_classes.db').get_id_data('classes')])
tigerSqlite.Sqlite('student_classes.db').insert('classes', ('id', _new_id), ('data', {'name':_name, 'owner':_maker, 'students':[{'classid':i, **a} for i, a in enumerate(final_rows, 1)]}))
return _new_id
@classmethod
def xlsx_create_class(cls, _name:str, _filename:str, _maker:int) -> int:
_data = pd.read_excel(f'class_rosters/{_filename}')
new_data = list(zip(list(_data['Student']), list(_data['E-mail Address'])))
_new_rows = [{'name':' '.join(a.split(', ')[::-1]), 'email':b, 'role':False} for a, b in new_data]
final_rows = [user_manager.User.add_user(i).__dict__ for i in _new_rows]
_new_id = (lambda x:1 if not x else max(x)+1)([a for a, _ in tigerSqlite.Sqlite('student_classes.db').get_id_data('classes')])
tigerSqlite.Sqlite('student_classes.db').insert('classes', ('id', _new_id), ('data', {'name':_name, 'owner':_maker, 'students':[{'classid':i, **a} for i, a in enumerate(final_rows, 1)]}))
return _new_id
@classmethod
def create_class(cls, _name:str, _id:int, _maker:int) -> int:
_file = [i for i in os.listdir('class_rosters') if i.startswith(f'class_roster{_id}')][0]
return getattr(cls, f'{_file.split(".")[-1]}_create_class')(_name, _file, _maker)
@classmethod
def get_maker_classes(cls, _maker:id) -> typing.List[typing.Callable]:
return All_classes([_class({'id':int(a), **b}) for a, b in tigerSqlite.Sqlite('student_classes.db').get_id_data('classes') if int(b['owner']) == _maker])
if __name__ == '__main__':
tigerSqlite.Sqlite('test_game_db.db').update('games', [('data', game_utilites.refresh_game_data())], [('id', 1)])
| {"/game_main.py": ["/game_user.py", "/game_manager.py", "/user_manager.py"], "/user_manager.py": ["/game_utilites.py"], "/game_manager.py": ["/game_utilites.py", "/user_manager.py"]} |
57,414 | Ajax12345/Protest-Game | refs/heads/master | /game_utilites.py | import typing, json, functools
import tigerSqlite
def test_game_load(flag:bool = False) -> typing.Callable:
def _inner(_f:typing.Callable) -> typing.Callable:
def wrapper(cls, _data, _id:int, _user:str) -> typing.Any:
return cls(_data, _user) if flag else _f(cls, _data, _id, _user)
return wrapper
return _inner
def validate_user(_f:typing.Callable) -> typing.Callable:
def wrapper(cls, _id:int, _user:str) -> typing.Any:
_data = json.load(open('game_data.json'))
return {'success':'False'} if not any(i['name'] == _user for i in _data['players']) else _f(cls, _data, _id, _user)
return wrapper
def validate_add_user(_f:typing.Callable) -> typing.Callable:
@functools.wraps(_f)
def wrapper(cls, _payload:dict) -> typing.Any:
_formatted = tigerSqlite.Sqlite('protest_users.db').get_id_data('users')
_option = [{'id':a, **b} for a, b in _formatted if all(b[j] == k for j, k in _payload.items())]
return _f(cls, _formatted, _payload) if not _option else cls(_option[0])
return wrapper
if __name__ == '__main__':
d = json.load(open('game_data.json'))
d['time'] = '0:00'
d['board'] = []
d['rounds'] = []
d['round'] = []
d['score'] = {"police": 0, "protesters": 0}
with open('game_data.json', 'w') as f:
json.dump(d, f)
| {"/game_main.py": ["/game_user.py", "/game_manager.py", "/user_manager.py"], "/user_manager.py": ["/game_utilites.py"], "/game_manager.py": ["/game_utilites.py", "/user_manager.py"]} |
57,415 | YuTengChang/akam_raana | refs/heads/master | /ra_python/ra_hadoop_insert.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 15:58:55 2015
@author: ychang
This script do the insertion of RA-Info files
"""
import sys,os
sys.path.append('/home/testgrp/RAAnalysis/')
import subprocess as sp
import glob
import time
import configurations.config as config
import configurations.hdfsutil as hdfs
def main():
# parameters
# RAinput='/home/testgrp/RAAnalysis/ra_data/ra_msg/assignments_agg'
# current time
timenow = int(time.time())
# #### RA PART ####
for ra_concat_file in glob.glob( os.path.join(config.RAconcat,'*.txt') ):
infoitem = ra_concat_file.split('.')
datestamp = infoitem[1]
UUID = infoitem[2]
STARTTIME = infoitem[3]
ENDTIME = infoitem[4]
print 'uuid=%s, starttime=%s, endtime=%s, datestamp=%s' % (UUID, STARTTIME, ENDTIME, datestamp)
# upload ra_concat_file to HDFS
print '*** uploading file to HDFS ' + ra_concat_file
try:
sp.check_call(['hadoop', 'fs', '-put', ra_concat_file, config.hdfs_ra_intermediate])
sp.check_call(['rm', ra_concat_file])
intermediate_file_name = ra_concat_file.split('/')[-1]
except:
print 'HDFS file upload error'
# still remove the local file (keeps from cumulating the concatenated files)
sp.check_call(['rm', ra_concat_file])
continue # check the next ra_concat_file
# create corresponding HDFS directory
# PIG will create the HDFS in the designated folder
# run PIG script to utilize AVRO
# example: HADOOP_USER_NAME=akamai; pig11 -p datestamp=20151201 -p uuid=0e0bda82-9823-11e5-b44e-300ed5c5f881 -p ts=1448980818 /home/testgrp/RAAnalysis/pig/csv_to_avro.pig
print '*** pig serializes the data into HDFS for file ' + ra_concat_file
cmd = '%s; %s -p datestamp=%s -p uuid=%s -p ts=%s %s; %s' % ( config.cmd_hadoop_user_akamai,
config.cmd_pig11,
datestamp,
UUID,
STARTTIME,
config.csv_to_avro_pig_script,
config.cmd_hadoop_user_testgrp )
#print cmd
try:
print 'try the pig script...'
sp.check_call( cmd, shell=True )
# pig log cleanup _log directory and _SUCCESS file when successful
this_ra_temp_hdfs_location = config.hdfs_ra_temp % (datestamp,
UUID,
STARTTIME)
this_ra_map_hdfs_location = config.hdfs_ra_map % (datestamp,
UUID,
STARTTIME)
# copy the file from ramap [PIG OUTPUT] to RA_map folder [HIVE]
print 'copy the file to RA_map folder'
print 'HDFS copy RA-avro fail' if hdfs.cp( this_ra_temp_hdfs_location+'/part-r-00000.avro',
this_ra_map_hdfs_location) else 'HDFS copy RA-avro success'
# remove the remainder in ramap [PIG output] folder (not fully clear yet)
print 'remove the remainder in the ramap folder'
cmd = '%s; hadoop fs -rm -r %s; %s' % (config.cmd_hadoop_user_akamai,
this_ra_temp_hdfs_location,
config.cmd_hadoop_user_testgrp)
sp.check_call( cmd, shell=True )
#cmd = '%s; hadoop fs -rm %s/_SUCCESS' % (config.cmd_hadoop_user_change,
# this_ra_map_hdfs_location)
#sp.check_call( cmd, shell=True )
# remove the remainder in the RA_pre_Avro folder
print 'intermediate_file_name = ' + intermediate_file_name
hdfs.rm( config.hdfs_ra_intermediate+'/'+intermediate_file_name )
# update the HIVE table
cmd = "hive -e 'use raana; MSCK REPAIR TABLE ra_map;'"
sp.check_call( cmd, shell=True )
except:
print 'PIG script Error.'
# Alter HIVE table correspondingly
# #### NS PART ####
# list_ns_files = glob.glob( os.path.join(config.NSdata,'*_mpg*.txt') ) # glob get the full path
# for fileitem in list_ns_files:
# config.hdfs_ns_info
# #### RG PART ####
if __name__=='__main__':
sys.exit(main())
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,416 | YuTengChang/akam_raana | refs/heads/master | /configurations/hdfsutil.py | import os,sys
import subprocess as sp
#sys.path.append('/home/testgrp/perfTMI/perftmi')
def ls_full(dir_name):
# return list of 2-tuple with (name, timestamp) fromat in HDFS
ls_list = sp.check_output('hadoop fs -ls %s'%dir_name,
shell=True).strip().split('\n')
ls_list = [(i.rsplit(' ',1)[1],
i.rsplit(' ',3)[1]+' '+i.rsplit(' ',3)[2])
for i in ls_list[1:] ]
return ls_list
def ls(dir_name):
# return list of names in HDFS
ls_list = sp.check_output('hadoop fs -ls %s'%dir_name,
shell=True).strip().split('\n')
ls_list = [i.rsplit(' ',1)[1] for i in ls_list[1:]]
return ls_list
def mkdir(dir_name):
# make directory in HDFS, abort when folder exists
if sp.call('hadoop fs -test -d %s'%dir_name, shell=True):
return sp.check_call('hadoop fs -mkdir -p %s'%dir_name,shell=True)
else:
return 'HDFS destination folder exists and abort'
def put(here,there):
# upload file to HDFS
return sp.check_call('hadoop fs -put %s %s'%(here,there),shell=True)
def cp(source, target):
# copy source HDFS FILE to target HDFS DIRECTORY, make directory if needed
if sp.call('hadoop fs -test -d %s'%target, shell=True):
mkdir(target)
return sp.check_call('hadoop fs -cp %s %s'%(source,target), shell=True)
def rm(pth_to_rm,r=False):
#
if r:
return sp.check_call('hadoop fs -rm -r %s'%pth_to_rm,shell=True)
else:
return sp.check_call('hadoop fs -rm %s'%pth_to_rm,shell=True)
def getmerge(input_dir,out_path):
return sp.check_call('hadoop fs -getmerge %s %s'%(input_dir,out_path),
shell=True)
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,417 | YuTengChang/akam_raana | refs/heads/master | /u4_scott/tmi_lib.py | #!/a/bin/python2.7
import sys,os
import subprocess as sp
sys.path.append('/home/testgrp/perfTMI/perftmi')
import etc.config as config
import time,datetime
import threading
#### call to get the partitions for a table in the perftmi database
def get_existing_partitions(tablename):
ex_parts=hive_output('show partitions %s'%tablename)
if ex_parts == '':
ex_parts_list = []
else:
ex_parts_list=[i.split('=')[1] for i in ex_parts.strip().split('\n')]
return map(int,ex_parts_list)
#### save a list of exising partition somewhere
def save_existing_partitions(tablename,filename):
with open(filename,'w') as f:
sp.check_call([config.hive,'-e','use perftmi; show partitions %s;'%tablename],stdout=f)
####
#### remove a partition from a table
def remove_partition(tablename,partition_name,partition_value):
hql_str = 'alter table %s drop partition(%s=%s);'%(tablename,partition_name,partition_value)
hive_call(hql_str)
return
def add_partition(tablename,partition_name,partition_value):
hql_str = 'alter table %s add partition(%s=%s);'%(tablename,partition_name,partition_value)
hive_call(hql_str)
return
#### call to mail program
def send_mail(dest_address,subject,message):
p1 = sp.Popen(['echo',message],stdout=sp.PIPE)
p2 = sp.Popen(['mail','-s',subject,dest_address],stdin=p1.stdout)
p2.communicate()
return
#### convert between partition startdate format for pipeline and
#### python datetime object
def startdate_to_datetime(startdate_string):
dte_tme = datetime.datetime.strptime(startdate_string,'%Y%m%d%H')
return dte_tme
def datetime_to_startdate(datetime_object):
return '%s%s%s%s'%(
str(datetime_object.year),
str(datetime_object.month).zfill(2),
str(datetime_object.day).zfill(2),
str(datetime_object.hour).zfill(2))
def convert_to_timestamp(startdate):
dt = datetime.datetime.strptime(repr(startdate),'%Y%m%d%H')
ts = (dt - datetime.datetime(1970,1,1)).total_seconds()
return int(ts)
#### make a call to hive #####
def hive_call(hql_str):
hql_full_str = 'use perftmi; add jar %s; '%config.ddrjar + hql_str
print hql_full_str
try:
sp.check_call([config.hive,'-e',hql_full_str])
except sp.CalledProcessError:
raise HiveError
return
def hive_call2(hql_str):
''' same as above but w/o use perftmi;'''
hql_full_str = 'add jar %s;'%config.ddrjar + hql_str
try:
sp.check_call([config.hive,'-e',hql_full_str])
except sp.CalledProcessError:
raise HiveError
return
def hive_call2compress(hql_str,file2save):
hql_full_str = 'add jar%s;'%config.ddrjar + hql_str
try:
sp.check_call('hive -e %s | gzip > %s'%(
hql_full_str,file2save))
except sp.CalledProcessError:
raise HiveError
return
def hive_output(hql_str):
hql_full_str = 'use perftmi; add jar %s; '%config.ddrjar + hql_str
print hql_full_str
if not os.path.isfile(config.hive):
print "%s does not exist"%config.hive
try:
str_out = sp.check_output([config.hive,'-e',hql_full_str])
return str_out
except sp.CalledProcessError:
raise HiveError
return
##### some helpers function
def fence_ppreply(startdate):
pp_parts = get_existing_partitions('ppreply')
#executive decision -- include all ppl messages within the last hour
ts = convert_to_timestamp(startdate)
pp_parts_inc = [i for i in pp_parts if i > ts - 3601]
return (min(pp_parts_inc),max(pp_parts_inc))
##### subprocess timeout for Spark timeouts
class Command(object):
def __init__(self,cmd):
self.cmd = cmd
self.process = None
def run(self,timeout):
def target():
self.process = sp.Popen(self.cmd,shell=True)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.process.returncode
##### some Exceptions #####
class HiveError(Exception):
def __init__(self):
self.message = 'Unable to complete query, check Syntax'
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,418 | YuTengChang/akam_raana | refs/heads/master | /ra_python/rg_hadoop_insert.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 15:58:55 2015
@author: ychang
This script do the insertion of RG-Info files
"""
import sys,os
sys.path.append('/home/testgrp/RAAnalysis/')
import subprocess as sp
import glob
import time
import configurations.config as config
import configurations.hdfsutil as hdfs
def main():
# #### RG PART ####
datestamp = time.strftime( "%Y%m%d", time.gmtime() )
list_rg_files = glob.glob( os.path.join(config.RGdata,
'region_info_*.txt') ) # glob get the full path
for rg_file in list_rg_files:
infoitem = rg_file.split('_')
datestamp = infoitem[-1].split('.')[0]
print 'file = ' + rg_file
print ' datestamp = %s' % ( datestamp )
# put the file to HDFS folder and remove from Local
try:
print ' upload to HDFS'
hdfs_rg_destination = config.hdfs_rg_info % datestamp
hdfs.mkdir( hdfs_rg_destination )
hdfs.put( rg_file, hdfs_rg_destination )
print ' adding partition'
hiveql_str = config.add_rg_partition % ( datestamp )
sp.check_call(['hive','-e',hiveql_str])
sp.check_call(['rm',rg_file])
except:
print 'region information update failed for date=%s.' % datestamp
if __name__=='__main__':
sys.exit(main())
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,419 | YuTengChang/akam_raana | refs/heads/master | /ra_python/ra_concat.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 15:58:55 2015
@author: ychang
This script do the concatenation of RA files
"""
import sys,os
sys.path.append('/home/testgrp/RAAnalysis/')
import subprocess as sp
import glob
import time
import configurations.config as config
import shutil
def main():
# parameters
# RAinput='/home/testgrp/RAAnalysis/ra_data/ra_msg/assignments_agg'
# current time
timenow = int(time.time())
# #### RA PART ####
# only process folder with "age" one hour or more
#cmd = 'rm /home/testgrp/RAAnalysis/ra_data/ra_concated/Assign*'
#sp.check_call(cmd, shell=True)
#cmd = 'rm /home/testgrp/RAAnalysis/ra_data/mpg_tree/MPG*'
#sp.check_call(cmd, shell=True)
print "********************************"
print "start processing at %s"% str(timenow)
print "********************************"
for diritem in os.listdir( config.RAinput ):
# exclude if not a valid directory
if not os.path.isdir( os.path.join( config.RAinput, diritem) ):
print '%s not a valid folder' % diritem
continue
# processing the folder
if (timenow-int(diritem) > config.mapmon_msg_latency):
print 'processing folder: ' + diritem
# print header from one file
header_file = os.path.join( config.RAconcat, config.ra_msg_header)
one_RAinput_file = os.listdir( os.path.join( config.RAinput, diritem ) )[0] # pick arbitrary file
print 'sample data for header info: ' + one_RAinput_file
cmd = '/home/testgrp/bin/PrintMessageRegionAssignment %s | head -6 > %s' % (os.path.join(config.RAinput, diritem, one_RAinput_file), header_file)
sp.check_call(cmd, shell=True)
# extract index (UUID and timestamp of the RA file)
print ' ::::::::: SAMPLE HEADER FILE ::::::::::'
with open( header_file ) as f:
for line in f:
print ' :: '+line,
if 'mpd_uuid' in line:
UUID = line.split(' ')[1][:-1]
if 'start_time' in line:
STARTTIME = line.split(' ')[1][:-1]
if 'end_time' in line:
ENDTIME = line.split(' ')[1][:-1]
print ' :::::::::::::::::::::::::::::::::::::::'
datestamp = time.strftime( "%Y%m%d", time.gmtime(int(STARTTIME)) ) # get the datestamp of the patch
dftime = int(ENDTIME) - int(STARTTIME) # get the interval width
print 'uuid=%s, starttime=%s, endtime=%s, date=%s, dtime=%s' % (UUID, STARTTIME, ENDTIME, datestamp, str(dftime))
ra_concat_file = os.path.join( config.RAconcat, 'Assignment.%s.%s.%s.%s.txt' % (datestamp, UUID, STARTTIME, ENDTIME) )
cmd1 = 'var_param2="%s,%s,%s";' % (STARTTIME, dftime, UUID)
for fileitem in os.listdir( os.path.join( config.RAinput, diritem) ):
# write to the concatenate file
ra_input_file = os.path.join( config.RAinput, diritem, fileitem )
cmd2 = '''/home/testgrp/bin/PrintMessageRegionAssignment %s | tail -n+8 | awk -v var=`echo $var_param2` 'BEGIN{OFS=","}{if($1!~/mpg/){print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,var;}}' >> %s''' % (ra_input_file, ra_concat_file)
cmd = cmd1 + cmd2
print "process input " + ra_input_file
sp.check_call(cmd, shell=True)
# #### MPD TREE & NS_INFO with lookups ####
list_treefiles = glob.glob( os.path.join(config.MPGtree,'*.tr') ) # glob get the full path
mpg_output_file = os.path.join( config.MPGtree, 'MPG.%s.%s.tr' % (datestamp,UUID) )
print 'checking for MPG tree ' + mpg_output_file
if mpg_output_file not in list_treefiles:
print "Create non-exist MPG tree files..."
cmd = '/home/testgrp/bin/RALookup_new %s getATree %s' % (ra_input_file, mpg_output_file)
sp.check_call(cmd, shell=True)
print "NS lookups..."
# lookup the ns info file
ns_input_file = glob.glob( os.path.join( config.NSdata, '*_%s.txt' % datestamp) )[0] # glob get the full path
ns_output_file = os.path.join( os.path.join( config.NSdata, 'ns_info_%s_mpg.%s.txt' % (datestamp, UUID) ) )
cmd = '''cat %s | /home/testgrp/RAAnalysis/bin/TreeToolsUInt lookup -full %s | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15}' > %s ''' % (ns_input_file, mpg_output_file, ns_output_file)
print cmd
sp.check_call(cmd, shell=True)
# clean up the files when success concatenate
shutil.rmtree( os.path.join( config.RAinput, diritem ) )
if __name__=='__main__':
sys.exit(main())
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,420 | YuTengChang/akam_raana | refs/heads/master | /pig/tsv_to_csv.py | #!/usr/bin/python
''' add header fields to raw tsv rows and make it a csv '''
from __future__ import print_function, division
import sys, os
from glob import glob
IN_DIR = '/home/testgrp/ksprong/tmp/mapmon/tsv/'
OUT_DIR = '/home/testgrp/ksprong/tmp/mapmon/csv/'
def transform_tsv(fn):
with open(IN_DIR + fn) as filein, open(OUT_DIR + fn, 'w') as fileout:
# get header fields
filein.readline()
mpd_uuid = filein.readline().strip().split(':')[1].strip()
filein.readline()
t_st = filein.readline().strip().split(':')[1].strip()
t_end = filein.readline().strip().split(':')[1].strip()
filein.readline()
static_header = ','.join(['', 't_st', 't_end', 'mpd_uuid'])
static_vals = ','.join(['', t_st, t_end, mpd_uuid])
# write header
print(','.join(filein.readline().strip().split('\t')) + static_header)
# loop over the rest
for line in filein:
if line[0:3] == 'mpg':
continue
fileout.write(','.join(line.strip().split('\t')) + static_vals + '\n')
def main():
ts = sys.argv[1]
for fn in glob(IN_DIR + ts + '*'):
transform_tsv(fn.split('/')[-1])
if __name__ == '__main__':
main()
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,421 | YuTengChang/akam_raana | refs/heads/master | /configurations/config.py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 10:57:38 2015
@author: ychang
"""
### configuration files
#==============================================================================
# # HDFS Locations
#==============================================================================
#hdfs_score = '/ghostcache/hadoop/data/MRQOS/score/ts=%s'
#hdfs_distance = '/ghostcache/hadoop/data/MRQOS/distance/ts=%s'
#hdfs_in_country = '/ghostcache/hadoop/data/MRQOS/in_country/ts=%s'
#hdfs_in_continent = '/ghostcache/hadoop/data/MRQOS/in_continent/ts=%s'
#hdfs_ra_load = '/ghostcache/hadoop/data/MRQOS/ra_load/ts=%s'
hdfs_table = '/ghostcache/hadoop/data/MRQOS'
hdfs_ra_intermediate = '/ghostcache/hadoop/data/RAANA/RA_pre_Avro'
hdfs_ra_map = '/ghostcache/hadoop/data/RAANA/RA_map/datestamp=%s/uuid=%s/ts=%s'
hdfs_ns_info = '/ghostcache/hadoop/data/RAANA/ns_info/datestamp=%s/uuid=%s'
hdfs_ra_temp = '/ghostcache/hadoop/data/RAANA/ramap/datestamp=%s/uuid=%s/ts=%s'
hdfs_rg_info = '/ghostcache/hadoop/data/RAANA/rg_info/datestamp=%s'
#==============================================================================
# # Local File Locations
#==============================================================================
# DIRECTORY
RAinput='/home/testgrp/RAAnalysis/ra_data/ra_msg/assignments_agg'
RAconcat='/home/testgrp/RAAnalysis/ra_data/ra_concated'
MPGtree='/home/testgrp/RAAnalysis/ra_data/mpg_tree'
NSdata='/home/testgrp/RAAnalysis/ra_data/ns_info'
RGdata='/home/testgrp/RAAnalysis/ra_data/rg_info'
ra_data = '/home/testgrp/RAAnalysis/ra_data/ra_msg'
rg_data = '/home/testgrp/RAAnalysis/ra_data/rg_info'
ra_query = '/home/testgrp/RAAnalysis/ra_query'
# FILE
ra_msg_header = 'RA_Header_File_single_one_eighth.tmp'
csv_to_avro_pig_script = '/home/testgrp/RAAnalysis/pig/csv_to_avro.pig'
#==============================================================================
# # Constant Configurations
#==============================================================================
query_retrial = 20 # 20 times
query_timeout = 20 # 20 sec
mrqos_table_delete = 60 * 30 # 1800 sec = 30 minutes
mrqos_join_delete = 60 * 60 * 24 * 32 # 32 days
mapmon_msg_latency = 60 * 60 * 1.5 # 5400 sec = 1.5 hours
#==============================================================================
# # HIVE Scripts, table managements
#==============================================================================
add_rg_partition = 'use RAANA; alter table rg_info add partition(datestamp=%s);'
add_ns_partition = 'use RAANA; alter table ns_info add partition(datestamp=%s,uuid="%s");'
#==============================================================================
# # PIG Settings
#==============================================================================
cmd_hadoop_user_akamai = 'HADOOP_USER_NAME=akamai'
cmd_hadoop_user_testgrp = 'HADOOP_USER_NAME=testgrp'
cmd_pig = '/home/testgrp/pig/pig-0.11.0-cdh4.6.0/bin/pig'
cmd_pig11 = '%s -Dpig.additional.jars=/home/testgrp/pig/pig-0.11.0-cdh4.6.0/lib/piggybank.jar:/home/testgrp/pig/pig-0.11.0-cdh4.6.0/lib/avro-1.7.6.jar:/home/testgrp/pig/pig-0.11.0-cdh4.6.0/lib/avro-mapred-1.7.6-hadoop2.jar:/home/testgrp/pig/pig-0.11.0-cdh4.6.0/lib/json-simple-1.1.jar:/home/testgrp/pig/pig-0.11.0-cdh4.6.0/lib/snappy-java-1.0.4.1.jar:/home/testgrp/pig/pig-0.11.0-cdh4.6.0/lib/jackson-core-asl-1.9.13.jar:/home/testgrp/pig/pig-0.11.0-cdh4.6.0/lib/jackson-mapper-asl-1.9.13.jar -Dudf.import.list=org.apache.pig.piggybank.storage.avro' % cmd_pig
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,422 | YuTengChang/akam_raana | refs/heads/master | /ra_python/ns_hadoop_insert.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 15:58:55 2015
@author: ychang
This script do the insertion of NS-Info files
"""
import sys,os
sys.path.append('/home/testgrp/RAAnalysis/')
import subprocess as sp
import glob
import time
import configurations.config as config
import configurations.hdfsutil as hdfs
def main():
# #### RG PART ####
datestamp = time.strftime( "%Y%m%d", time.gmtime() )
list_ns_files = glob.glob( os.path.join(config.NSdata,
'ns_info_*mpg.*.txt') ) # glob get the full path
for ns_file in list_ns_files:
infoitem = ns_file.rsplit('_',2)
datestamp = infoitem[1]
UUID = infoitem[2].split('.')[1]
print 'file = ' + ns_file
print ' datestamp = %s; UUID = %s' % ( datestamp,
UUID )
# put the file to HDFS folder and remove from Local
try:
print ' upload to HDFS'
hdfs_ns_destination = config.hdfs_ns_info % ( datestamp, UUID )
hdfs.mkdir( hdfs_ns_destination )
hdfs.put( ns_file, hdfs_ns_destination )
print ' adding partition'
hiveql_str = config.add_ns_partition % ( datestamp, UUID )
print ' '+hiveql_str
sp.check_call(['hive','-e',hiveql_str])
print ' remove local file: ' + ns_file
sp.check_call(['rm',ns_file])
except:
print 'resolver(NS) information update failed for date=%s, uuid=%s' % ( datestamp, UUID )
if __name__=='__main__':
sys.exit(main())
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,423 | YuTengChang/akam_raana | refs/heads/master | /ra_python/tr_distance.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 15:58:55 2015
@author: ychang
This script do the distance calculation
"""
import sys,os
sys.path.append('/home/testgrp/RAAnalysis/')
import math
#R = 3963.1676
R = 6371
pi = 3.141592653
lat1 = 40.0
lon1 = -70.0
for line in sys.stdin:
# anything passed into the streaming is a tab-separate string
line_str = line.strip().split('\t')
(region, ecor, service, rg_name, nghost, prp, latitude, longitude, datestamp) = line_str
lat2 = float(latitude)
lon2 = float(longitude)
lat1r = math.radians(lat1)
lat2r = math.radians(lat2)
lon1r = math.radians(lon1)
lon2r = math.radians(lon2)
#print 'lat2=%s, lat2r=%s' % (str(lat2), str(lat2r))
dlat = abs(lat2r - lat1r)/2
dlon = abs(lon2r - lon1r)/2
a = math.pow(math.sin(dlat),2) + math.cos(lat2r) * math.cos(lat1r) * math.pow(math.sin(dlon),2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
print '\t'.join([str(region),
str(ecor),
service,
rg_name,
str(nghost),
str(prp),
str(latitude),
str(longitude),
str( round(R*c,3) )])
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,424 | YuTengChang/akam_raana | refs/heads/master | /u4_scott/hdfs_file_manager.py | import os,sys
import subprocess as sp
#sys.path.append('/home/testgrp/perfTMI/perftmi')
def ls(dir_name):
return sp.check_output('hadoop fs -ls %s'%dir_name,shell=True)
def ls_list(dir_name):
ls_list = sp.check_output('hadoop fs -ls %s'%dir_name,
shell=True).strip().split('\n')
ls_list = [i.rsplit(' ',1)[1] for i in ls_list[1:]]
return ls_list
def mkdir(dir_name):
return sp.check_call('hadoop fs -mkdir %s'%dir_name,shell=True)
def put(here,there):
return sp.check_call('hadoop fs -put %s %s'%(here,there),shell=True)
def rm(pth_to_rm,r=False):
if r:
return sp.check_call('hadoop fs -rm -r %s'%pth_to_rm,shell=True)
else:
return sp.check_call('hadoop fs -rm %s'%pth_to_rm,shell=True)
def getmerge(input_dir,out_path):
return sp.check_call('hadoop fs -getmerge %s %s'%(input_dir,out_path),
shell=True)
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,425 | YuTengChang/akam_raana | refs/heads/master | /ra_python/ra_cleansing.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 15:58:55 2015
@author: ychang
"""
import sys,os
sys.path.append('/home/testgrp/RAAnalysis/')
import subprocess as sp
import time
#import httplib
import YT_Timeout as ytt
import configurations.config as config
def main():
''' this function perform the necessary pre-processing steps on RA messages
and upload the data to the hadoop cluster '''
#==============================================================================
# # combine RegionAssignment Message
#==============================================================================
#==============================================================================
# # Print Out the distinct MPD Tree
#==============================================================================
#==============================================================================
# # remove partitions from hive table
#==============================================================================
def mrqos_table_cleanup():
''' when called, this function will delete all partitions
the clnspp table as long as it is older than the threshold '''
#get the lowest partition
partition_list = open('/tmp/testgrp/mrqos_table_partitions.txt','w')
sp.call(['hive','-e','use mrqos; show partitions score;'],stdout=partition_list)
partition_list.close()
partition_list = open('/tmp/testgrp/mrqos_table_partitions.txt','r')
str_parts = partition_list.read()
partition_list.close()
os.remove('/tmp/testgrp/mrqos_table_partitions.txt')
str_parts_list = [i.split('=',1)[1] for i in str_parts.strip().split('\n')]
str_parts_list_int=map(int,str_parts_list)
#check if "partitions" is within the threshold
timenow = int(time.time())
for partition in str_parts_list_int:
if partition < timenow - config.mrqos_table_delete:
try:
mtype = ['score','distance','in_country','in_continent','ra_load'];
for item in mtype:
# drop partitions
sp.check_call(['hive','-e','use mrqos; alter table ' + item + ' drop if exists partition(ts=%s)' % partition])
# remove data from HDFS
hdfs_d = os.path.join(config.hdfs_table,item,'ts=%s' % partition)
sp.check_call(['hadoop','fs','-rm','-r', hdfs_d])
except sp.CalledProcessError:
raise GenericHadoopError
#==============================================================================
# # remove partitions from hive table
#==============================================================================
def mrqos_join_cleanup():
''' when called, this function will delete all partitions
the clnspp table as long as it is older than the threshold '''
#get the lowest partition
partition_list = open('/tmp/testgrp/mrqos_table_partitions.txt','w')
sp.call(['hive','-e','use mrqos; show partitions mrqos_join;'],stdout=partition_list)
partition_list.close()
partition_list = open('/tmp/testgrp/mrqos_table_partitions.txt','r')
str_parts = partition_list.read()
partition_list.close()
os.remove('/tmp/testgrp/mrqos_table_partitions.txt')
str_parts_list = [i.split('=',1)[1] for i in str_parts.strip().split('\n')]
str_parts_list_int=map(int,str_parts_list)
#check if "partitions" is within the threshold
timenow = int(time.time())
for partition in str_parts_list_int:
if partition < timenow - config.mrqos_join_delete:
try:
# drop partitions
sp.check_call(['hive','-e','use mrqos; alter table mrqos_join drop if exists partition(ts=%s)' % partition])
# remove data from HDFS
hdfs_d = os.path.join(config.hdfs_table,'mrqos_join','ts=%s' % partition)
sp.check_call(['hadoop','fs','-rm','-r', hdfs_d])
except sp.CalledProcessError:
raise GenericHadoopError
#==============================================================================
# # upload to hdfs and link to hive table
#==============================================================================
def upload_to_hive(listname, hdfs_d, ts, tablename):
''' this function will create a partition directory in hdfs with the requisite timestamp. It will
then add the partition to the table cl_ns_pp with the appropriate timestamp '''
#hdfs_d = config.hdfsclnspp % (ts)
# create the partition
try:
sp.check_call(['hadoop','fs','-mkdir',hdfs_d])
# upload the data
except sp.CalledProcessError:
raise HadoopDirectoryCreateError
try:
sp.check_call(['hadoop','fs','-put',listname,hdfs_d])
except sp.CalledProcessError:
raise HadoopDataUploadError
# add the partition
try:
hiveql_str = 'use mrqos; alter table ' + tablename + ' add partition(ts=%s);' % (ts)
sp.check_call(['hive','-e',hiveql_str])
except sp.CalledProcessError:
raise HiveCreatePartitionError
#==============================================================================
# # hdfs error category
#==============================================================================
class HadoopDirectoryCreateError(Exception):
def __init__(self):
self.message = "Unable to create directory."
class HadoopDataUploadError(Exception):
def __init__(self):
self.message = "Unable to upload data to hdfs."
class HiveCreatePartitionError(Exception):
def __init__(self):
self.message = "Unable to create partition"
class GenericHadoopError(Exception):
def __init__(self):
self.message = "Something went wrong in deleting a partition or associated data"
if __name__=='__main__':
sys.exit(main())
| {"/ra_python/ra_cleansing.py": ["/configurations/config.py"]} |
57,426 | ChenFengling/mobike-cup | refs/heads/master | /1_feature_leak.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 14:55:30 2017
leak velocty feature
@author: leo
"""
from util import *
import pandas as pd
import numpy as np
from tqdm import tqdm
train_ds, test_ds = loadDataset()
def get_leak_user(ds, d_time):
sorted_ds = ds.sort_values(['userid','starttime'])
sorted_ds['start'] = pd.to_datetime(sorted_ds['starttime']).reset_index(drop=True).values
sorted_ds['endtime'] = 0
sorted_ds['endloc_leak'] = ''
sorted_ds['nextuser'] = -1
rows_len = ds.shape[0]
sorted_ds['endtime'].iloc[:(rows_len-1)] = sorted_ds['start'].iloc[1:].values
sorted_ds['endloc_leak'].iloc[:(rows_len-1)] = sorted_ds['geohashed_start_loc'].iloc[1:].values
sorted_ds['nextuser'].iloc[:(rows_len-1)] = sorted_ds['userid'].iloc[1:].values
sorted_ds['delta_t'] = sorted_ds['endtime'] - sorted_ds['start']
sorted_ds.delta_t = sorted_ds.delta_t.astype('timedelta64[s]')
if 'dist' in sorted_ds.columns:
sorted_ds['v'] = sorted_ds['dist'] / sorted_ds['delta_t'] * 3600
sorted_ds['valid'] = sorted_ds['delta_t'].apply(lambda x: (x < d_time))
leak = sorted_ds[sorted_ds.valid & (sorted_ds.userid == sorted_ds.nextuser)]
return leak[['orderid', 'userid', 'endloc_leak', 'delta_t']]
def get_leak_bike(ds, d_time):
sorted_ds = ds.sort_values(['bikeid','starttime'])
sorted_ds['start'] = pd.to_datetime(sorted_ds['starttime']).reset_index(drop=True).values
sorted_ds['endtime'] = 0
sorted_ds['endloc_leak'] = ''
sorted_ds['nextbike'] = -1
rows_len = ds.shape[0]
sorted_ds['endtime'].iloc[:(rows_len-1)] = sorted_ds['start'].iloc[1:].values
sorted_ds['endloc_leak'].iloc[:(rows_len-1)] = sorted_ds['geohashed_start_loc'].iloc[1:].values
sorted_ds['nextbike'].iloc[:(rows_len-1)] = sorted_ds['bikeid'].iloc[1:].values
sorted_ds['delta_t'] = sorted_ds['endtime'] - sorted_ds['start']
sorted_ds.delta_t = sorted_ds.delta_t.astype('timedelta64[s]')
if 'dist' in sorted_ds.columns:
sorted_ds['v'] = sorted_ds['dist'] / sorted_ds['delta_t'] * 3600
sorted_ds['valid'] = sorted_ds['delta_t'].apply(lambda x: (x < d_time))
leak = sorted_ds[sorted_ds.valid & (sorted_ds.bikeid == sorted_ds.nextbike)]
return leak[['orderid', 'bikeid', 'endloc_leak', 'delta_t']]
leak1 = get_leak_user(test_ds, 1800)
leak2 = get_leak_bike(test_ds, 7000)
tmp = pd.merge(leak2, leak1, how = 'outer', on ='orderid')
tmp['delta_t_x'][tmp.delta_t_x.isnull() & tmp.delta_t_y.notnull()] = tmp['delta_t_y'][tmp.delta_t_x.isnull() & tmp.delta_t_y.notnull()]
tmp['delta_t_y'][tmp.delta_t_x == tmp.delta_t_y] = np.nan
tmp[['orderid', 'delta_t_x']].to_csv('tmp/time_test_leak.csv', index = False)
leak1 = get_leak_user(train_ds, 1800)
leak2 = get_leak_bike(train_ds, 7000)
tmp = pd.merge(leak2, leak1, how = 'outer', on ='orderid')
tmp['delta_t_x'][tmp.delta_t_x.isnull() & tmp.delta_t_y.notnull()] = tmp['delta_t_y'][tmp.delta_t_x.isnull() & tmp.delta_t_y.notnull()]
tmp['delta_t_y'][tmp.delta_t_x == tmp.delta_t_y] = np.nan
tmp[['orderid', 'delta_t_x']].to_csv('tmp/time_train_leak.csv', index = False)
#%%
| {"/1_feature_leak.py": ["/util.py"]} |
57,427 | ChenFengling/mobike-cup | refs/heads/master | /1_sample_and_feature.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 16:15:50 2017
@author: leo
"""
#==============================================================================
# 1.构建样本集
# 2.生成xgb可用的数据特征
#==============================================================================
from util import *
import pandas as pd
import feather as ft
import sys
import argparse
import os
import gc
#%%
parser = argparse.ArgumentParser( description = 'Convert CSV file to Vowpal Wabbit format.' )
parser.add_argument( "out_file", help = "path to hist file" )
parser.add_argument( "-s", "--day_start", type = int, default = 23,
help = "day for train start)")
parser.add_argument( "-e", "--day_end", type = int, default = 24,
help = "day for train start)")
args = parser.parse_args()
#%%
D = 2 ** 17
def hash_element(el):
h = hash(el) % D
if h < 0:
h = h + D
return h
#%% select sample
#==============================================================================
# S2: 获取起点对应的前3的终点
#
#==============================================================================
def get_start_top10end(train):
sloc_eloc_count = train.groupby(['geohashed_start_loc', 'geohashed_end_loc', 'longitude_end_loc', 'latitude_end_loc'],as_index=False)['geohashed_end_loc'].agg({'sloc_eloc_count':'count'})
sloc_eloc_count.sort_values('sloc_eloc_count',inplace=True)
sloc_eloc_count = sloc_eloc_count.groupby('geohashed_start_loc').tail(20)
sloc_eloc_count = sloc_eloc_count.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'longitude_end_loc' : 'longitude_candidate_end_loc',
'latitude_end_loc' : 'latitude_candidate_end_loc'})
del sloc_eloc_count['sloc_eloc_count']
return sloc_eloc_count
#==============================================================================
# S1: 获取用户对应的起点与终点
#==============================================================================
def get_user_loc(train, test):
col_start = ['orderid','userid', 'geohashed_start_loc']
col_end = ['orderid','userid', 'geohashed_end_loc']
user_start = pd.concat([train[col_start], test[col_start]])
user_end = train[col_end]
tmp1 = user_start[['userid', 'geohashed_start_loc']].drop_duplicates()
tmp2 = user_end[['userid', 'geohashed_end_loc']].drop_duplicates()
#tmp1 = user_start.groupby(['userid', 'geohashed_start_loc']).ply_select(start_num = X.orderid.count()).reset_index()
#tmp2 = user_end.groupby(['userid', 'geohashed_end_loc']).ply_select(end_num = X.orderid.count()).reset_index()
tmp3 = tmp1.rename(columns={'geohashed_start_loc':'candidate_end_loc'})
#del tmp3['start_num']
tmp4 = tmp2.rename(columns={'geohashed_end_loc':'candidate_end_loc'})
#del tmp4['end_num']
user_loc = pd.concat([tmp3, tmp4])
user_loc.drop_duplicates(inplace=True)
decode = np.vectorize(geohash.decode)
tmp = decode(user_loc["candidate_end_loc"])
user_loc["latitude_candidate_end_loc"] = tmp[0]* math.pi / 180
user_loc["longitude_candidate_end_loc"] = tmp[1]* math.pi / 180
return user_loc
#==============================================================================
# bike 按时间排序进行leak
#==============================================================================
def get_leak_bike(ds, d_time):
sorted_ds = ds.sort_values(['bikeid','starttime'])
sorted_ds['start'] = pd.to_datetime(sorted_ds['starttime']).reset_index(drop=True).values
sorted_ds['endtime'] = 0
sorted_ds['endloc_leak'] = ''
sorted_ds['nextbike'] = -1
rows_len = ds.shape[0]
sorted_ds['endtime'].iloc[:(rows_len-1)] = sorted_ds['start'].iloc[1:].values
sorted_ds['endloc_leak'].iloc[:(rows_len-1)] = sorted_ds['geohashed_start_loc'].iloc[1:].values
sorted_ds['nextbike'].iloc[:(rows_len-1)] = sorted_ds['bikeid'].iloc[1:].values
sorted_ds['delta_t'] = sorted_ds['endtime'] - sorted_ds['start']
sorted_ds.delta_t = sorted_ds.delta_t.astype('timedelta64[s]')
if 'dist' in sorted_ds.columns:
sorted_ds['v'] = sorted_ds['dist'] / sorted_ds['delta_t'] * 3600
sorted_ds['valid'] = sorted_ds['delta_t'].apply(lambda x: (x < d_time))
leak = sorted_ds[sorted_ds.valid & (sorted_ds.bikeid == sorted_ds.nextbike)]
return leak[['orderid', 'bikeid', 'endloc_leak', 'delta_t']]
#==============================================================================
# user 按时间排序进行leak
#==============================================================================
def get_leak_user(ds, d_time):
sorted_ds = ds.sort_values(['userid','starttime'])
sorted_ds['start'] = pd.to_datetime(sorted_ds['starttime']).reset_index(drop=True).values
sorted_ds['endtime'] = 0
sorted_ds['endloc_leak'] = ''
sorted_ds['nextuser'] = -1
rows_len = ds.shape[0]
sorted_ds['endtime'].iloc[:(rows_len-1)] = sorted_ds['start'].iloc[1:].values
sorted_ds['endloc_leak'].iloc[:(rows_len-1)] = sorted_ds['geohashed_start_loc'].iloc[1:].values
sorted_ds['nextuser'].iloc[:(rows_len-1)] = sorted_ds['userid'].iloc[1:].values
sorted_ds['delta_t'] = sorted_ds['endtime'] - sorted_ds['start']
sorted_ds.delta_t = sorted_ds.delta_t.astype('timedelta64[s]')
if 'dist' in sorted_ds.columns:
sorted_ds['v'] = sorted_ds['dist'] / sorted_ds['delta_t'] * 3600
sorted_ds['valid'] = sorted_ds['delta_t'].apply(lambda x: (x < d_time))
leak = sorted_ds[sorted_ds.valid & (sorted_ds.userid == sorted_ds.nextuser)]
return leak[['orderid', 'userid', 'endloc_leak', 'delta_t']]
def make_sample(train):
leak_bike = get_leak_bike(train, 3600*24*10)
leak_bike.rename(columns={'endloc_leak':'candidate_end_loc',
'delta_t' : 'delta_t1'},inplace=True)
decode = np.vectorize(geohash.decode)
tmp = decode(leak_bike["candidate_end_loc"])
leak_bike["latitude_candidate_end_loc"] = tmp[0]* math.pi / 180
leak_bike["longitude_candidate_end_loc"] = tmp[1]* math.pi / 180
#leak_bike['isLeak'] = 1
train0 = pd.merge(leak_bike[['orderid', 'candidate_end_loc', 'latitude_candidate_end_loc', 'longitude_candidate_end_loc']] ,
train, on = 'orderid', how = 'left')
train1 = pd.merge(train, user_loc, on = 'userid', how ='left')
train1 = train1[train1.candidate_end_loc != train1.geohashed_start_loc]
#del train1['user_hist']
train2 = pd.merge(train, stop3e, on ='geohashed_start_loc', how ='left')
result = pd.concat([train1, train2, train0]).drop_duplicates()
result = result[result.geohashed_start_loc != result.candidate_end_loc]
#add bike leak diff time
result = pd.merge(result, leak_bike[['orderid', 'candidate_end_loc', 'delta_t1']], on = ['orderid', 'candidate_end_loc'], how ='left')
#add user leak diff time
leak_user = get_leak_user(train, 3600*24*10)
leak_user.rename(columns={'endloc_leak':'candidate_end_loc',
'delta_t' : 'delta_t2'},inplace=True)
result = pd.merge(result, leak_user[['orderid', 'candidate_end_loc', 'delta_t2']], on = ['orderid', 'candidate_end_loc'], how ='left')
#add candidate loc from user_loc
user_loc['user_hist'] = 1
result = pd.merge(result, user_loc, on = ['userid', 'candidate_end_loc', 'latitude_candidate_end_loc', 'longitude_candidate_end_loc'], how = 'left')
calcDirection(result)
result['date'] = (result['date'] == 'weekend').astype('uint8')
result['start'] = result.geohashed_start_loc.apply(hash_element).astype('uint32')
result['end'] = result.geohashed_end_loc.apply(hash_element).astype('uint32')
result['label'] = (result['geohashed_end_loc']==result['candidate_end_loc']).astype('uint8')
num_orderid = float(result.orderid.unique().shape[0])
print 'sample中的orderid数: %d'%(num_orderid)
print 'num_orderid: %d, 理论极限:%.4f'%(num_orderid, result['label'].sum() / num_orderid)
return result
def log_numeric(x):
if x == np.nan:
return 0
else:
return np.round(np.log(1+x))
#%%
def getDeltaT(t1, t2):
return 12 - abs(abs(t1 - t2) - 12)
def getDeltaDeg(d1, d2):
return 180 - abs(abs(d1 - d2) - 180)
ti = np.array(np.arange(1,25,.5)).reshape(1,48)
def getAvgT(x):
x = x.values.reshape(len(x),1)
result = np.sum(getDeltaT(ti, x), axis = 0)
return np.argmin(result) * 5
di = np.array(range(1,361,1)).reshape(1,360)
def getAvgD(x):
x = x.values.reshape(len(x),1)
result = np.sum(getDeltaDeg(di, x), axis = 0)
return np.argmin(result)
# col = user|path|time|mean
def getStdT(ds, by, col):
ds['tmp'] = getDeltaT(ds['hour'] , n0[col]/10.0)
grp = ds.groupby(by, as_index=False)['tmp'].map(np.mean)
grp.dropna(inplace = True)
del ds['tmp']
ds = ds.merge(grp, on = by, how ='left')
#%%
#%user feature
'''
用户使用次数
用户平均距离
用户最大距离
用户最小距离
'''
def getUserProfile(now, hist):
result = hist.groupby('userid', as_index = False).agg({'orderid':'count','dist':[np.mean, 'max', 'min', 'std']})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'orderid|count':'user|count',
'dist|mean':'user|dist|mean',
'dist|max': 'user|dist|max',
'dist|min': 'user|dist|min',
'dist|std': 'user|dist|std'},inplace=True)
now = pd.merge(now, result, on='userid', how='left')
now.fillna(0,inplace=True)
convertUint16(now, ['user|dist|mean', 'user|dist|max', 'user|dist|min', 'user|dist|std'])
convertUint8(now, ['user|count'])
return now
#% user eloc feature
'''
用户到这个地点的平均时间
用户到这个地点的平均方向(deg)
用户到这个地点的平均距离
'''
def getUserElocProfile(now, hist):
result = hist.groupby(['userid', 'geohashed_end_loc'], as_index = False).agg({'orderid':'count','hour':[getAvgT], 'deg':[getAvgD], 'dist':[np.mean,'max', 'min', 'std']})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid|count' : 'user|eloc|count',
'dist|mean':'user|eloc|dist|mean',
'dist|max': 'user|eloc|dist|max',
'dist|min': 'user|eloc|dist|min',
'dist|std': 'user|eloc|dist|std',
'hour|getAvgT': 'user|eloc|time|mean',
'deg|getAvgD': 'user|eloc|deg|mean'},inplace=True)
now = pd.merge(now, result, on=['userid', 'candidate_end_loc'], how='left')
now.fillna(0,inplace=True)
convertUint16(now, ['user|eloc|dist|mean', 'user|eloc|dist|max',
'user|eloc|dist|min', 'user|eloc|dist|std','user|eloc|deg|mean'])
convertUint8(now, ['user|eloc|time|mean','user|eloc|count'])
return now
#%user sloc feature
def getUserSlocProfile(now, hist):
result = hist.groupby(['userid', 'geohashed_start_loc'], as_index = False).agg({'orderid' : 'count', 'hour':[getAvgT],'deg':[getAvgD], 'dist':[np.mean,'max', 'min', 'std']})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'orderid|count': 'user|sloc|count',
'dist|mean':'user|sloc|dist|mean',
'dist|max': 'user|sloc|dist|max',
'dist|min': 'user|sloc|dist|min',
'dist|std': 'user|sloc|dist|std',
'hour|getAvgT': 'user|sloc|time|mean',
'deg|getAvgD': 'user|sloc|deg|mean'},inplace=True)
now = pd.merge(now, result, on=['userid', 'geohashed_start_loc'], how='left')
result.rename(columns={'geohashed_start_loc' : 'candidate_end_loc',
'user|sloc|count': 'user|eloc_as_sloc|count',
'user|sloc|dist|mean':'user|eloc_as_sloc|dist|mean',
'user|sloc|dist|max': 'user|eloc_as_sloc|dist|max',
'user|sloc|dist|min': 'user|eloc_as_sloc|dist|min',
'user|sloc|dist|std': 'user|eloc_as_sloc|dist|std',
'user|sloc|time|mean': 'user|eloc_as_sloc|time|mean',
'user|sloc|deg|mean': 'user|eloc_as_sloc|deg|mean'},inplace=True)
now = pd.merge(now, result, on=['userid', 'candidate_end_loc'], how='left')
now.fillna(0,inplace=True)
convertUint16(now, ['user|sloc|dist|mean', 'user|sloc|dist|max',
'user|sloc|dist|min', 'user|sloc|dist|std','user|sloc|deg|mean'])
convertUint8(now, ['user|sloc|time|mean','user|sloc|count'])
return now
#%user path feature
def getUserPathProfile(now, hist):
result = hist.groupby(['userid','geohashed_start_loc','geohashed_end_loc'],as_index=False).agg({'orderid':'count','hour':[getAvgT],'deg':[getAvgD],'dist': [np.mean,'max', 'min']})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid|count' : 'user|path|count',
'hour|getAvgT': 'user|path|time|mean',
'dist|mean':'user|path|dist|mean',
'dist|max': 'user|path|dist|max',
'dist|min': 'user|path|dist|min',
'deg|getAvgD': 'user|path|deg|mean'},inplace=True)
now = pd.merge(now, result, on=['userid', 'geohashed_start_loc','candidate_end_loc'], how='left')
now.fillna(0,inplace=True)
convertUint16(now, ['user|path|dist|mean', 'user|path|dist|max',
'user|path|dist|min', 'user|path|deg|mean'])
convertUint8(now, ['user|path|time|mean','user|path|count'])
return now
#%User Reverse Path feature
def getUserRevPathProfile(now, hist):
result = hist.groupby(['userid','geohashed_start_loc','geohashed_end_loc'],as_index=False).agg({'orderid':'count','hour':[getAvgT]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns = {'geohashed_start_loc':'candidate_end_loc',
'geohashed_end_loc':'geohashed_start_loc',
'orderid|count': 'user|RevPath|count',
'hour|getAvgT': 'user|RevPath|time|mean'},inplace=True)
now = pd.merge(now,result,on=['userid','geohashed_start_loc','candidate_end_loc'],how='left')
now.fillna(0,inplace=True)
convertUint8(now, ['user|RevPath|time|mean','user|RevPath|count'])
return now
#%sloc feature
def getSlocProfile(now, hist):
result = hist.groupby(['geohashed_start_loc'], as_index = False).agg({'orderid' : 'count', 'hour':[getAvgT] , 'deg':[getAvgD], 'dist':[np.mean,'max', 'min', 'std']})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'orderid|count': 'sloc|count',
'dist|mean':'sloc|dist|mean',
'dist|max': 'sloc|dist|max',
'dist|min': 'sloc|dist|min',
'dist|std': 'sloc|dist|std',
'deg|getAvgD': 'sloc|deg|mean',
'hour|getAvgT' : 'sloc|time|mean'},inplace=True)
now = pd.merge(now, result, on=['geohashed_start_loc'], how='left')
#now['sloc|count'].fillna(0,inplace=True)
result.rename(columns={'geohashed_start_loc' : 'candidate_end_loc',
'sloc|count': 'eloc_as_sloc|count',
'sloc|dist|mean':'eloc_as_sloc|dist|mean',
'sloc|dist|max': 'eloc_as_sloc|dist|max',
'sloc|dist|min': 'eloc_as_sloc|dist|min',
'sloc|dist|std': 'eloc_as_sloc|dist|std',
'sloc|time|mean': 'eloc_as_sloc|time|mean',
'sloc|deg|mean': 'eloc_as_sloc|deg|mean'},inplace=True)
now = pd.merge(now, result, on=['candidate_end_loc'], how='left')
now.fillna(0,inplace=True)
convertUint16(now, ['sloc|count',
'sloc|dist|mean', 'sloc|dist|max',
'sloc|dist|min', 'sloc|deg|mean',
'eloc_as_sloc|count',
'eloc_as_sloc|dist|mean', 'eloc_as_sloc|dist|max',
'eloc_as_sloc|dist|min', 'eloc_as_sloc|deg|mean'])
#convertUint8(now, ['eloc_as_sloc|time|mean','eloc_as_sloc|count'])
return now
#%eloc feature
def getElocProfile(now, hist):
result = hist.groupby([ 'geohashed_end_loc'], as_index = False).agg({'orderid':'count','hour':[getAvgT], 'deg':[getAvgD], 'dist':[np.mean,'max', 'min', 'std']})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid|count' : 'eloc|count',
'dist|mean':'eloc|dist|mean',
'dist|max': 'eloc|dist|max',
'dist|min': 'eloc|dist|min',
'dist|std': 'eloc|dist|std',
'hour|getAvgT': 'eloc|time|mean',
'deg|getAvgD': 'eloc|deg|mean'},inplace=True)
now = pd.merge(now, result, on=['candidate_end_loc'], how='left')
now.fillna(0,inplace=True)
convertUint16(now, ['eloc|count',
'eloc|dist|mean', 'eloc|dist|max',
'eloc|dist|min', 'eloc|dist|std', 'eloc|deg|mean'])
convertUint8(now, ['eloc|time|mean'])
return now
#%loc to loc (path) feature
'''
这个路径的历史平均时间
这个路径的历史次数
'''
def getPathProfile(now, hist):
result = hist.groupby(['geohashed_start_loc', 'geohashed_end_loc'],
as_index = False).agg({'hour':[getAvgT],
'orderid':'count',
'deg':[getAvgD],
'dist': [np.mean,'max', 'min']})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid|count':'path_count',
'hour|getAvgT': 'path_avgTime',
'dist|mean':'path|dist|mean',
'dist|max': 'path|dist|max',
'dist|min': 'path|dist|min',
'deg|getAvgD': 'path|deg|mean'},inplace=True)
now = pd.merge(now, result, on=['geohashed_start_loc', 'candidate_end_loc'], how='left')
now.fillna(0,inplace=True)
convertUint16(now, ['path|dist|mean', 'path|dist|max',
'path|dist|min', 'path|deg|mean'])
convertUint8(now, ['path_count','path_avgTime'])
return now
#User Reverse Path feature
def getRevPathProfile(now, hist):
result = hist.groupby(['geohashed_start_loc','geohashed_end_loc'],as_index=False).agg({'orderid':'count', 'hour':[getAvgT]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns = {'geohashed_start_loc':'candidate_end_loc',
'geohashed_end_loc':'geohashed_start_loc',
'orderid|count':'RevPath|count',
'hour|getAvgT': 'RevPath|time|mean'},inplace=True)
now = pd.merge(now,result,on=['geohashed_start_loc','candidate_end_loc'],how='left')
now.fillna(0,inplace=True)
convertUint8(now, ['RevPath|count','RevPath|time|mean'])
return now
#%%
#==============================================================================
# 补充特征
#==============================================================================
def getUniCnt(x):
return len(x.unique())
def getUserSloc(now, hist):
result = hist.groupby(['geohashed_start_loc'], as_index = False).agg({'userid' : [getUniCnt]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'userid|getUniCnt':'sloc|user|count'},inplace=True)
now = pd.merge(now, result, on=['geohashed_start_loc'], how='left')
return now
def getUserEloc(now, hist):
result = hist.groupby(['geohashed_end_loc'], as_index = False).agg({'userid' : [getUniCnt]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'userid|getUniCnt':'eloc|user|count'},inplace=True)
now = pd.merge(now, result, on=['candidate_end_loc'], how='left')
return now
def getStartEnd(now, hist):
result = hist.groupby(['geohashed_end_loc'], as_index = False).agg({'geohashed_start_loc' : [getUniCnt]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'geohashed_start_loc|getUniCnt':'eloc|start|count'},inplace=True)
now = pd.merge(now, result, on=['candidate_end_loc'], how='left')
result = hist.groupby(['geohashed_start_loc'], as_index = False).agg({'geohashed_end_loc' : [getUniCnt]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc|getUniCnt':'sloc|end|count'},inplace=True)
now = pd.merge(now, result, on=['geohashed_start_loc'], how='left')
return now
def getUserStartEnd(now, hist):
result = hist.groupby(['userid', 'geohashed_end_loc'], as_index = False).agg({'geohashed_start_loc' : [getUniCnt]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'geohashed_start_loc|getUniCnt':'user|eloc|start|count'},inplace=True)
now = pd.merge(now, result, on=['userid', 'candidate_end_loc'], how='left')
result = hist.groupby(['userid', 'geohashed_start_loc'], as_index = False).agg({'geohashed_end_loc' : [getUniCnt]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc|getUniCnt':'user|sloc|end|count'},inplace=True)
now = pd.merge(now, result, on=['userid', 'geohashed_start_loc'], how='left')
result = hist.groupby(['geohashed_start_loc', 'geohashed_end_loc'], as_index = False).agg({'userid' : [getUniCnt]})
result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'userid|getUniCnt':'start|end|user|count'},inplace=True)
now = pd.merge(now, result, on=['geohashed_start_loc', 'candidate_end_loc'], how='left')
return now
def gen_features(now, hist):
printHeader('生成user sloc feature')
now = getUserSlocProfile(now, hist)
printHeader('生成user eloc feature')
now = getUserElocProfile(now, hist)
printHeader('生成user path feature')
now = getUserPathProfile(now, hist)
printHeader('生成user rev path feature')
now = getUserRevPathProfile(now, hist)
gc.collect()
printHeader('生成 sloc feature')
now = getSlocProfile(now, hist)
printHeader('生成 eloc feature')
now = getElocProfile(now, hist)
printHeader('生成path feature')
now = getPathProfile(now, hist)
printHeader('生成 rev path feature')
now = getRevPathProfile(now, hist)
gc.collect()
printHeader('生成user feature')
now = getUserProfile(now, hist)
printHeader('生成user sloc')
now = getUserSloc(now, hist)
printHeader('生成user eloc')
now = getUserEloc(now, hist)
printHeader('生成start end')
now = getStartEnd(now, hist)
printHeader('生成user start end')
now = getUserStartEnd(now, hist)
gc.collect()
print 'gen feature done!'
#now.fillna(0,inplace=True)
return now
#%%
def append_to_csv(batch, csv_file):
props = dict(encoding='utf-8', index=False)
if not os.path.exists(csv_file):
batch.to_csv(csv_file, **props)
else:
batch.to_csv(csv_file, mode='a', header=False, **props)
def chunk_dataframe(df, n):
for i in range(0, len(df), n):
yield df.iloc[i:i+n]
chunkSize = 10000000
chunks = []
#%% select sample
train, test = loadDataset()
#train, test = getTinyDS(train, test)
#h0 = train[train.day < 21]
#n0 = train[train.day >= 21]
#%%
print 'from %d to %d'%(args.day_start, args.day_end)
sample_file = ''
if args.day_start < 24:
h0 = train[(train.day < args.day_start) | (train.day > args.day_end)]
n0 = train[(train.day >= args.day_start) & (train.day <= args.day_end)]
sample_file = 'tmp/make_sample_%d-%d'%(args.day_start, args.day_end)
print 'num of orderid: %d'%n0.shape[0]
else:
h0 = train
n0 = test
n0['day'][n0['day'] == 1] = 32
n0['geohashed_end_loc'] = np.nan
sample_file = 'tmp/make_sample_25-32'
del train, test
#%%
printHeader('构造候选集')
user_loc = get_user_loc(h0, n0)
stop3e = get_start_top10end(h0)
#%%
#if param == 'tiny':
# df_all = ft.read_dataframe('tmp/train_tiny.feather')
#else:
# df_all = ft.read_dataframe('tmp/train.feather')
printHeader('make sample train')
if os.path.exists(sample_file):
#df_all = pd.read_csv(sample_file)
df_all = ft.read_dataframe(sample_file)
else:
df_all = make_sample(n0)
df_all.reset_index(inplace = True)
del df_all['index']
df_all.columns = df_all.columns.astype('str')
df_all.to_feather(sample_file)
print 'save finished!'
df_all = df_all[df_all.candidate_end_loc.notnull()]
if args.day_start > 24:
df_all = df_all[(df_all.day >= args.day_start) & (df_all.day <= args.day_end)]
printHeader( 'gen sample train features')
df_all = gen_features(df_all, h0)
if args.day_start < 24:
time_train = pd.read_csv('tmp/time_train_leak.csv')
df_all = pd.merge(df_all, time_train, on = 'orderid', how = 'left')
df_all['velocity'] = df_all['dist'] / df_all['delta_t_x'] * 3600
num = float(df_all.orderid.unique().shape[0])
print 'train orginal shape: %.3f'%(df_all['label'].sum() / num)
df_all = df_all[((df_all.velocity > 20000) & (df_all.label == 1)) | (df_all.velocity <= 20000) | df_all.velocity.isnull()]
print 'train after shape: %.3f'%(df_all['label'].sum() / num)
#print df_all.columns()
df_all.reset_index(inplace = True)
del df_all['index']
df_all.columns = df_all.columns.astype('str')
print df_all.columns
print 'begin to save df_all!'
save_features = ['label', 'orderid', 'userid', 'bikeid', 'biketype', 'dist', 'start','end',
'geohashed_start_loc', 'geohashed_end_loc', 'candidate_end_loc',
'Direction', 'deg', 'day', 'date','time', 'starttime','hour',
'user|count', 'user|dist|mean','user|dist|max','user|dist|min', 'user|dist|std',
'user|eloc|count', 'user|eloc|time|mean', 'user|eloc|dist|mean','user|eloc|dist|max',
'user|eloc|dist|min', 'user|eloc|dist|std', 'user|eloc|deg|mean',
'user|eloc_as_sloc|count', 'user|eloc_as_sloc|time|mean', 'user|eloc_as_sloc|dist|mean',
'user|eloc_as_sloc|dist|max', 'user|eloc_as_sloc|dist|min', 'user|eloc_as_sloc|deg|mean',
'user|sloc|count', 'user|sloc|time|mean', 'user|sloc|dist|mean','user|sloc|dist|max',
'user|sloc|dist|min', 'user|sloc|dist|std', 'user|sloc|deg|mean',
'user|path|count', 'user|path|time|mean','user|path|dist|mean','user|path|dist|max','user|path|dist|min',
'user|RevPath|count', 'user|RevPath|time|mean',
'sloc|count','sloc|dist|mean','sloc|dist|max','sloc|dist|min', 'sloc|dist|std',
'sloc|deg|mean','sloc|time|mean',
'eloc|count','eloc|dist|mean','eloc|dist|max', 'eloc|dist|min', 'eloc|dist|std',
'eloc|time|mean','eloc|deg|mean',
'eloc_as_sloc|count','eloc_as_sloc|dist|mean','eloc_as_sloc|dist|max',
'eloc_as_sloc|dist|min', 'eloc_as_sloc|time|mean','eloc_as_sloc|deg|mean',
'path_count', 'path_avgTime', 'path|dist|mean','path|dist|max','path|dist|min',
'RevPath|count', 'RevPath|time|mean',
'delta_t1', 'delta_t2', 'user_hist', 'sloc|user|count','eloc|user|count',
'eloc|start|count', 'sloc|end|count', 'user|eloc|start|count', 'user|sloc|end|count',
'start|end|user|count']
df_all[save_features].to_feather(args.out_file)
| {"/1_feature_leak.py": ["/util.py"]} |
57,428 | ChenFengling/mobike-cup | refs/heads/master | /3_lgb_v9.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 7 22:42:04 2017
lgb
@author: leo
"""
from util import *
import numpy as np
import pandas as pd
import lightgbm as lgb
import feather as ft
import gc
import time
import os
#%%
#==============================================================================
# V1:09-23 01(2017-09-25 11:51:02): 采用23,24的数据,加入diff特征,轮次1500, 分数0.312, scale_pos_weight = 10, 分数0.312
# V1 (2017-09-25 11:58:18): 采用2324数据,加入diff,轮次1500,scale_pos_weight=1, is_unbalance = true
# V1 (09-25 20:06:05): 修正end的bug
# 09-23 20:44:47 采用2324号两天的训练,轮次1500,加入time range特征,分数仍为0.304
# 9/24 23:59:17 采用21-24号的数据来训练,轮次1500,加入了time range的两个特征,分数???
# V3---09-24\ 13\:23\:23 采用21-24的数据来训练,轮次3000,加入time range的两个特征,分数??
# V4---09-24 20:26:08: 采用21-24的数据,加入了gs4, gs5, gs5_u等github组合特征,轮次2500,去掉time range的特征0.294
# V5: 采用23-24的数据,在V4上述特征基础加入方向的组合特征,轮次2500
# V6 --- 09-25 00:47: 采用23-24数据,在V5上去掉github特征,只加方向,轮次1500
# V7:(09-25\ 14\:51\:03):采用23-24数据,在V5上去掉github特征,只加方向,轮次1500,scale_pos_weight=10(之前为30),得分0.313
# V8 (09-25 18:13:23): 采用23-24数据,在V7上,加time_range与共同起点数的特征,其他不变
# V9 (09-25\ 23\:37\:08): 在V8上加入6位地址特征,以前github特征 0.317
#==============================================================================
flag = False
from sklearn.model_selection import train_test_split
D = 2 ** 17
def hash_element(el):
h = hash(el) % D
if h < 0:
h = h + D
return h
import string
def get_diff(s,t):
abc = [str(i) for i in range(10)]+list(string.ascii_lowercase)
dic = {i:c for c,i in enumerate(abc)}
s,t = sorted([s,t])
c = 0
diff = 0
for i,j in zip(s,t):
if i!=j:
diff += len(abc)**(len(s)-c-1)*abs(dic[i]-dic[j])
c = c + 1
return diff
#%%
def getDeltaT(t1, t2):
return 12 - abs(abs(t1 - t2) - 12)
def getDeltaDeg(d1, d2):
return 180 - abs(abs(d1 - d2) - 180)
#==============================================================================
# #%%添加特征
#==============================================================================
predictors_004 = ['userid', 'biketype','hour','dist', 'deg', 'date','start','end',
'user|count','user|dist|mean','user|dist|max','user|dist|min',
'user|eloc|count', 'user|eloc|dist|mean','user|eloc|dist|max',
'user|eloc|dist|min','user|eloc|time|mean','user|eloc|deg|mean',
'user|sloc|count','user|sloc|dist|mean','user|sloc|dist|max',
'user|sloc|dist|min','user|sloc|deg|mean','user|path|count',
'user|path|dist|mean',
'user|path|time|mean','user|path|deg|mean','user|RevPath|count',
'sloc|count','sloc|dist|mean','sloc|dist|max','sloc|dist|min',
'sloc|deg|mean','eloc|count','eloc|dist|mean','eloc|dist|max',
'eloc|dist|min','eloc|time|mean','eloc|deg|mean','path_count',
'path_avgTime','path|dist|mean',
'path|deg|mean','RevPath|count']
predictors_005 = ['userid', 'biketype','hour','dist', 'deg', 'date','start','end',
'user|count', 'user|dist|mean','user|dist|max','user|dist|min', 'user|dist|std',
'user|eloc|count', 'user|eloc|time|mean', 'user|eloc|dist|mean','user|eloc|dist|max',
'user|eloc|dist|min', 'user|eloc|dist|std', 'user|eloc|deg|mean',
'user|sloc|count', 'user|sloc|time|mean', 'user|sloc|dist|mean','user|sloc|dist|max',
'user|sloc|dist|min', 'user|sloc|dist|std', 'user|sloc|deg|mean',
'user|eloc_as_sloc|count', 'user|eloc_as_sloc|time|mean', 'user|eloc_as_sloc|dist|mean',
'user|eloc_as_sloc|dist|max', 'user|eloc_as_sloc|dist|min', 'user|eloc_as_sloc|deg|mean',
'user|path|count', 'user|path|time|mean',
'user|RevPath|count', 'user|RevPath|time|mean',
'sloc|count','sloc|dist|mean','sloc|dist|max','sloc|dist|min', 'sloc|dist|std',
'sloc|deg|mean','sloc|time|mean',
'eloc|count','eloc|dist|mean','eloc|dist|max', 'eloc|dist|min', 'eloc|dist|std',
'eloc|time|mean','eloc|deg|mean',
'eloc_as_sloc|count','eloc_as_sloc|dist|mean','eloc_as_sloc|dist|max',
'eloc_as_sloc|dist|min', 'eloc_as_sloc|time|mean','eloc_as_sloc|deg|mean',
'path_count', 'path_avgTime',
'RevPath|count', 'RevPath|time|mean',
'delta_t1', 'delta_t2', 'user_hist']
predictors_006 = ['userid', 'biketype','hour','dist', 'deg', 'date','start','end',
'user|count', 'user|dist|mean','user|dist|max','user|dist|min', 'user|dist|std',
'user|eloc|count', 'user|eloc|time|mean', 'user|eloc|dist|mean','user|eloc|dist|max',
'user|eloc|dist|min', 'user|eloc|dist|std', 'user|eloc|deg|mean',
'user|sloc|count', 'user|sloc|time|mean', 'user|sloc|dist|mean','user|sloc|dist|max',
'user|sloc|dist|min', 'user|sloc|dist|std', 'user|sloc|deg|mean',
'user|eloc_as_sloc|count', 'user|eloc_as_sloc|time|mean', 'user|eloc_as_sloc|dist|mean',
'user|eloc_as_sloc|dist|max', 'user|eloc_as_sloc|dist|min', 'user|eloc_as_sloc|deg|mean',
'user|path|count', 'user|path|time|mean',
'user|RevPath|count', 'user|RevPath|time|mean',
'sloc|count','sloc|dist|mean','sloc|dist|max','sloc|dist|min', 'sloc|dist|std',
'sloc|deg|mean','sloc|time|mean',
'eloc|count','eloc|dist|mean','eloc|dist|max', 'eloc|dist|min', 'eloc|dist|std',
'eloc|time|mean','eloc|deg|mean',
'eloc_as_sloc|count','eloc_as_sloc|dist|mean','eloc_as_sloc|dist|max',
'eloc_as_sloc|dist|min', 'eloc_as_sloc|time|mean','eloc_as_sloc|deg|mean',
'path_count', 'path_avgTime',
'RevPath|count', 'RevPath|time|mean',
'user_hist', 'delta_t1', 'delta_t2', 'sloc|user|count','eloc|user|count',
'eloc|start|count', 'sloc|end|count', 'user|eloc|start|count', 'user|sloc|end|count',
'start|end|user|count', 'diff']#, 'time_range|eloc|count' ,'user|time_range|count']
timerange_features = ['time_range|eloc|count' ,'user|time_range|count']
githup_fea = ['gs4', 'gs5', 'gs6', 'gs6_user', 'gs5_user', 'us', 'ue',
'user|dist|max|over', 'user|eloc|dist|max|over', 'user|sloc|dist|max|over',
'eloc|dist|max|over', 'sloc|dist|max|over']
#githup_fea = ['us', 'ue']
dir_features = ['sloc|dir|count', 'sloc|dir|dist|mean', 'eloc|dir|count', 'eloc|dir|dist|mean',
'user|sloc|dir|count', 'user|sloc|dir|dist|mean', 'user|eloc|dir|count','user|eloc|dir|dist|mean']
same_loc_fea = ['sameendcount', 'start1count', 'start2count', 'end1count', 'end2count', 'samestartcount']
loc6_fea = ['start6|count', 'end6|count', 'user|start6|count', 'user|end6|count',
'user|start6-end|count', 'user|start6-end6|count', 'start6-end|count',
'start6-end6|count']
predictors_006.extend(githup_fea)
predictors_006.extend(dir_features)
predictors_006.extend(timerange_features)
#predictors_006.extend(same_loc_fea)
predictors_006.extend(loc6_fea)
v_get_diff = np.vectorize(get_diff)
def getSlocDirProfile(now, hist):
result = hist.groupby(['geohashed_start_loc','Direction'], as_index = False).agg({'orderid':'count',
'dist':'mean'})
result.rename(columns={'orderid':'sloc|dir|count',
'dist' : 'sloc|dir|dist|mean'
},inplace=True)
now = pd.merge(now, result, on = ['geohashed_start_loc','Direction'], how='left')
now['sloc|dir|count'].fillna(0, inplace = True)
return now
def getElocDirProfile(now, hist):
result = hist.groupby(['geohashed_end_loc','Direction'], as_index = False).agg({'orderid':'count',
'dist':'mean'})
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid':'eloc|dir|count',
'dist' : 'eloc|dir|dist|mean'
},inplace=True)
now = pd.merge(now, result, on = ['candidate_end_loc','Direction'], how='left')
now['eloc|dir|count'].fillna(0, inplace = True)
return now
def getUserSlocDirProfile(now, hist):
result = hist.groupby(['userid', 'geohashed_start_loc','Direction'], as_index = False).agg({'orderid':'count',
'dist':'mean'})
result.rename(columns={'orderid':'user|sloc|dir|count',
'dist' : 'user|sloc|dir|dist|mean'
},inplace=True)
now = pd.merge(now, result, on = ['userid', 'geohashed_start_loc','Direction'], how='left')
now['user|sloc|dir|count'].fillna(0, inplace = True)
return now
def getUserElocDirProfile(now, hist):
result = hist.groupby(['userid','geohashed_end_loc','Direction'], as_index = False).agg({'orderid':'count',
'dist':'mean'})
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid':'user|eloc|dir|count',
'dist' : 'user|eloc|dir|dist|mean'
},inplace=True)
now = pd.merge(now, result, on = ['userid','candidate_end_loc','Direction'], how='left')
now['user|eloc|dir|count'].fillna(0, inplace = True)
return now
#1001
def getUserTimerangeProfile(now, hist):
result = hist.groupby(['userid', 'time','date'], as_index = False).agg({'orderid':'count'})
#result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid' : 'user|time_range|count'
},inplace=True)
now = pd.merge(now, result, on=['userid', 'time', 'date'], how='left')
now['user|time_range|count'].fillna(0,inplace=True)
#convertUint8(now, ['user|time_range|count'])
return now
def getElocTimerangeProfile(now,hist):
result = hist.groupby([ 'time','geohashed_end_loc', 'date'], as_index = False).agg({'orderid':'count'})
#result.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in result.columns]
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid' : 'time_range|eloc|count',
},inplace=True)
now = pd.merge(now, result, on=['time','candidate_end_loc', 'date'], how='left')
now['time_range|eloc|count'].fillna(0,inplace=True)
return now
def getSloc6Profile(now, hist):
result = hist.groupby(['gs6'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'orderid' : 'start6|count'},inplace=True)
now = pd.merge(now, result, on = 'gs6', how = 'left')
now['start6|count'].fillna(0,inplace=True)
return now
def getEloc6Profile(now, hist):
result = hist.groupby(['ge6'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'orderid' : 'end6|count'},inplace=True)
now = pd.merge(now, result, on = 'ge6', how = 'left')
now['end6|count'].fillna(0,inplace=True)
return now
def getUserSloc6Profile(now, hist):
result = hist.groupby(['gs6','userid'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'orderid' : 'user|start6|count'},inplace=True)
now = pd.merge(now, result, on = ['gs6','userid'], how = 'left')
now['user|start6|count'].fillna(0,inplace=True)
return now
def getUserEloc6Profile(now, hist):
result = hist.groupby(['ge6','userid'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'orderid' : 'user|end6|count'},inplace=True)
now = pd.merge(now, result, on = ['ge6','userid'], how = 'left')
now['user|end6|count'].fillna(0,inplace=True)
return now
def getUserStart6EndProfile(now, hist):
result = hist.groupby(['userid', 'gs6', 'geohashed_end_loc'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid' : 'user|start6-end|count'},inplace=True)
now = pd.merge(now, result, on = ['userid', 'gs6', 'candidate_end_loc'], how = 'left')
now['user|start6-end|count'].fillna(0,inplace=True)
return now
def getUserStart6End6Profile(now, hist):
result = hist.groupby(['userid', 'gs6', 'ge6'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'orderid' : 'user|start6-end6|count'},inplace=True)
now = pd.merge(now, result, on = ['userid', 'gs6', 'ge6'], how = 'left')
now['user|start6-end6|count'].fillna(0,inplace=True)
return now
def getStart6EndProfile(now, hist):
result = hist.groupby(['gs6', 'geohashed_end_loc'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'geohashed_end_loc':'candidate_end_loc',
'orderid' : 'start6-end|count'},inplace=True)
now = pd.merge(now, result, on = ['gs6', 'candidate_end_loc'], how = 'left')
now['start6-end|count'].fillna(0,inplace=True)
return now
def getStart6End6Profile(now, hist):
result = hist.groupby(['gs6', 'ge6'], as_index = False).agg({'orderid':'count'})
result.rename(columns={'orderid' : 'start6-end6|count'},inplace=True)
now = pd.merge(now, result, on = ['gs6', 'ge6'], how = 'left')
now['start6-end6|count'].fillna(0,inplace=True)
return now
def gen_features(now, hist):
now = getSlocDirProfile(now, hist)
now = getElocDirProfile(now, hist)
now = getUserSlocDirProfile(now, hist)
now = getUserElocDirProfile(now, hist)
now = getUserTimerangeProfile(now, hist)
now = getElocTimerangeProfile(now, hist)
now['ge6'] = now['candidate_end_loc'].apply(lambda x: x[:6])
now['gs6'] = now['geohashed_start_loc'].apply(lambda x: x[:6])
hist['ge6'] = hist['geohashed_end_loc'].apply(lambda x: x[:6])
hist['gs6'] = hist['geohashed_start_loc'].apply(lambda x: x[:6])
now = getSloc6Profile(now, hist)
now = getEloc6Profile(now, hist)
now = getUserSloc6Profile(now, hist)
now = getUserEloc6Profile(now, hist)
now = getUserStart6EndProfile(now, hist)
now = getUserStart6End6Profile(now, hist)
now = getStart6EndProfile(now, hist)
now = getStart6End6Profile(now, hist)
del now['ge6']
del now['gs6']
return now
train_ds, _ = loadDataset()
train_ds['date'] = (train_ds['date'] == 'weekend').astype('uint8')
#将频次,算成每天的平均次数
def process(ds, day):
ds['us'] = (ds['geohashed_start_loc']+'_'+ds['userid'].astype(str)).apply(hash_element).astype('uint32')
ds['ue'] = (ds['candidate_end_loc']+'_'+ds['userid'].astype(str)).apply(hash_element).astype('uint32')
ds['end'] = ds['candidate_end_loc'].apply(hash_element).astype('uint32')
ds['user|count'] = ds['user|count'] / day
ds['user|eloc|count'] = ds['user|eloc|count'] / day
ds['user|sloc|count'] = ds['user|sloc|count'] / day
ds['user|path|count'] = ds['user|path|count'] / day
ds['user|RevPath|count'] = ds['user|RevPath|count'] / day
ds['sloc|count'] = ds['sloc|count'] / day
ds['eloc|count'] = ds['eloc|count'] / day
ds['path_count'] = ds['path_count'] / day
ds['RevPath|count'] = ds['RevPath|count'] / day
ds['sloc|user|count'] = ds['sloc|user|count'] / day
ds['eloc|user|count'] = ds['eloc|user|count'] / day
ds['eloc|start|count'] = ds['eloc|start|count'] / day
ds['sloc|end|count'] = ds['sloc|end|count'] / day
ds['user|eloc|start|count'] = ds['user|eloc|start|count'] / day
ds['user|sloc|end|count'] = ds['user|sloc|end|count'] / day
ds['start|end|user|count'] = ds['start|end|user|count'] / day
if 'user|time_range|count' in ds.columns:
ds['time_range|eloc|count'] = ds['time_range|eloc|count'] / day
ds['user|time_range|count'] = ds['user|time_range|count'] / day
ds['diff'] = v_get_diff(ds['geohashed_start_loc'], ds['candidate_end_loc'])
ds['user|dist|mean'][ds['user|dist|mean'].notnull()] = abs(ds['dist'][ds['user|dist|mean'].notnull()] * 1.0 / ds['user|dist|mean'][ds['user|dist|mean'].notnull()])
ds['user|dist|max'][ds['user|dist|max'].notnull()] = abs(ds['dist'][ds['user|dist|max'].notnull()] *1.0 / ds['user|dist|max'][ds['user|dist|max'].notnull()])
ds['user|dist|min'][ds['user|dist|min'].notnull()] = abs(ds['dist'][ds['user|dist|min'].notnull()] - ds['user|dist|min'][ds['user|dist|min'].notnull()])
ds['user|dist|std'][ds['user|dist|std'].notnull()] = (ds['user|dist|mean'][ds['user|dist|std'].notnull()]).astype('float32') / (1e-10 + ds['user|dist|std'][ds['user|dist|std'].notnull()])
ds['user|dist|max|over'] = 0
ds['user|dist|max|over'][ds['user|dist|max'].notnull()] = (ds['user|dist|max'][ds['user|dist|max'].notnull()] > 1).astype(int)
ds['user|eloc|dist|mean'][ds['user|eloc|dist|mean'].notnull()] = abs(ds['dist'][ds['user|eloc|dist|mean'].notnull()] *1.0 / ds['user|eloc|dist|mean'][ds['user|eloc|dist|mean'].notnull()])
ds['user|eloc|dist|max'][ds['user|eloc|dist|max'].notnull()] = abs(ds['dist'][ds['user|eloc|dist|max'].notnull()] * 1.0 / ds['user|eloc|dist|max'][ds['user|eloc|dist|max'].notnull()])
ds['user|eloc|dist|min'][ds['user|eloc|dist|min'].notnull()] = abs(ds['dist'][ds['user|eloc|dist|min'].notnull()] - ds['user|eloc|dist|min'][ds['user|eloc|dist|min'].notnull()])
ds['user|eloc|dist|std'][ds['user|eloc|dist|std'].notnull()] = (ds['user|eloc|dist|mean'][ds['user|eloc|dist|std'].notnull()]).astype('float32') / (1e-10 + ds['user|eloc|dist|std'][ds['user|eloc|dist|std'].notnull()])
ds['user|eloc|deg|mean'][ds['user|eloc|deg|mean'].notnull()] = getDeltaDeg(ds['deg'][ds['user|eloc|deg|mean'].notnull()], ds['user|eloc|deg|mean'][ds['user|eloc|deg|mean'].notnull()])
ds['user|eloc|time|mean'][ds['user|eloc|time|mean'].notnull()] = getDeltaT(ds['hour'][ds['user|eloc|time|mean'].notnull()], ds['user|eloc|time|mean'][ds['user|eloc|time|mean'].notnull()] / 10.0)
ds['user|eloc|dist|max|over'] = 0
ds['user|eloc|dist|max|over'][ds['user|eloc|dist|max'].notnull()] = (ds['user|eloc|dist|max'][ds['user|eloc|dist|max'].notnull()] > 1).astype(int)
ds['user|sloc|dist|mean'][ds['user|sloc|dist|mean'].notnull()] = abs(ds['dist'][ds['user|sloc|dist|mean'].notnull()] * 1.0 / ds['user|sloc|dist|mean'][ds['user|sloc|dist|mean'].notnull()])
ds['user|sloc|dist|max'][ds['user|sloc|dist|max'].notnull()] = abs(ds['dist'][ds['user|sloc|dist|max'].notnull()] * 1.0 / ds['user|sloc|dist|max'][ds['user|sloc|dist|max'].notnull()])
ds['user|sloc|dist|min'][ds['user|sloc|dist|min'].notnull()] = abs(ds['dist'][ds['user|sloc|dist|min'].notnull()] - ds['user|sloc|dist|min'][ds['user|sloc|dist|min'].notnull()])
ds['user|sloc|dist|std'][ds['user|sloc|dist|std'].notnull()] = (ds['user|sloc|dist|mean'][ds['user|sloc|dist|std'].notnull()]).astype('float32') / (1e-10 + ds['user|sloc|dist|std'][ds['user|sloc|dist|std'].notnull()] )
ds['user|sloc|deg|mean'][ds['user|sloc|deg|mean'].notnull()] = getDeltaDeg(ds['deg'][ds['user|sloc|deg|mean'].notnull()], ds['user|sloc|deg|mean'][ds['user|sloc|deg|mean'].notnull()])
ds['user|sloc|time|mean'][ds['user|sloc|time|mean'].notnull()] = getDeltaT(ds['hour'][ds['user|sloc|time|mean'].notnull()], ds['user|sloc|time|mean'][ds['user|sloc|time|mean'].notnull()] / 10.0)
ds['user|sloc|dist|max|over'] = 0
ds['user|sloc|dist|max|over'][ds['user|sloc|dist|max'].notnull()] = (ds['user|sloc|dist|max'][ds['user|sloc|dist|max'].notnull()] > 1).astype(int)
ds['eloc|dist|mean'][ds['eloc|dist|mean'].notnull()] = abs(ds['dist'][ds['eloc|dist|mean'].notnull()] * 1.0 / ds['eloc|dist|mean'][ds['eloc|dist|mean'].notnull()])
ds['eloc|dist|max'][ds['eloc|dist|max'].notnull()] = abs(ds['dist'][ds['eloc|dist|max'].notnull()] * 1.0 / ds['eloc|dist|max'][ds['eloc|dist|max'].notnull()])
ds['eloc|dist|min'][ds['eloc|dist|min'].notnull()] = abs(ds['dist'][ds['eloc|dist|min'].notnull()] - ds['eloc|dist|min'][ds['eloc|dist|min'].notnull()])
ds['eloc|dist|std'][ds['eloc|dist|std'].notnull()] = (ds['eloc|dist|mean'][ds['eloc|dist|std'].notnull()]).astype('float32') / (1e-10 + ds['eloc|dist|std'][ds['eloc|dist|std'].notnull()])
ds['eloc|dist|max|over'] = 0
ds['eloc|dist|max|over'][ds['eloc|dist|max'].notnull()] = (ds['eloc|dist|max'][ds['eloc|dist|max'].notnull()] > 1).astype(int)
ds['eloc|deg|mean'][ds['eloc|deg|mean'].notnull()] = getDeltaDeg(ds['deg'][ds['eloc|deg|mean'].notnull()], ds['eloc|deg|mean'][ds['eloc|deg|mean'].notnull()])
ds['eloc|time|mean'][ds['eloc|time|mean'].notnull()] = getDeltaT(ds['hour'][ds['eloc|time|mean'].notnull()], ds['eloc|time|mean'][ds['eloc|time|mean'].notnull()] / 10.0)
ds['sloc|dist|mean'][ds['sloc|dist|mean'].notnull()] = abs(ds['dist'][ds['sloc|dist|mean'].notnull()] * 1.0 / ds['sloc|dist|mean'][ds['sloc|dist|mean'].notnull()])
ds['sloc|dist|max'][ds['sloc|dist|max'].notnull()] = abs(ds['dist'][ds['sloc|dist|max'].notnull()] * 1.0 / ds['sloc|dist|max'][ds['sloc|dist|max'].notnull()])
ds['sloc|dist|min'][ds['sloc|dist|min'].notnull()] = abs(ds['dist'][ds['sloc|dist|min'].notnull()] - ds['sloc|dist|min'][ds['sloc|dist|min'].notnull()])
ds['sloc|dist|std'][ds['sloc|dist|std'].notnull()] = (ds['sloc|dist|mean'][ds['sloc|dist|std'].notnull()]).astype('float32') / (1e-10 + ds['sloc|dist|std'][ds['sloc|dist|std'].notnull()])
ds['sloc|dist|max|over'] = 0
ds['sloc|dist|max|over'][ds['sloc|dist|max'].notnull()] = (ds['sloc|dist|max'][ds['sloc|dist|max'].notnull()] > 1).astype(int)
ds['sloc|deg|mean'][ds['sloc|deg|mean'].notnull()] = getDeltaDeg(ds['deg'][ds['sloc|deg|mean'].notnull()], ds['sloc|deg|mean'][ds['sloc|deg|mean'].notnull()])
ds['sloc|time|mean'][ds['sloc|time|mean'].notnull()] = getDeltaT(ds['hour'][ds['sloc|time|mean'].notnull()], ds['sloc|time|mean'][ds['sloc|time|mean'].notnull()] / 10.0)
if dir_features[1] in ds.columns:
for fea in dir_features:
if 'count' in fea:
ds[fea] = ds[fea] / day
if 'dist|mean' in fea:
ds[fea][ds[fea].notnull()] = ds['dist'][ds[fea].notnull()] * 1.0 / ds[fea][ds[fea].notnull()]
ds['gs4'] = ds['geohashed_start_loc'].apply(lambda x: x[:4])
ds['gs5'] = ds['geohashed_start_loc'].apply(lambda x: x[:5])
ds['gs6'] = ds['geohashed_start_loc'].apply(lambda x: x[:6])
ds['gs6_user'] = (ds['gs6']+'_'+ds['userid'].astype(str)).apply(hash_element).astype('uint32')
ds['gs5_user'] = (ds['gs5']+'_'+ds['userid'].astype(str)).apply(hash_element).astype('uint32')
ds['gs4'] = ds['gs4'].apply(hash_element).astype('uint32')
ds['gs5'] = ds['gs5'].apply(hash_element).astype('uint32')
ds['gs6'] = ds['gs6'].apply(hash_element).astype('uint32')
#%%
params = {}
params['learning_rate'] = 0.01
params['boosting_type'] = 'gbdt'
params['objective'] = 'binary'
params['metric'] = 'auc'
params['sub_feature'] = 0.9
params['lambda_l1'] = 10
params['lambda_l2'] = 50
params['num_leaves'] = 300
params['max_depth'] = 10
params['bagging_fraction'] = 0.8
params['bagging_freq'] = 5
#params['query'] = 0
params['scale_pos_weight'] = 10
params['min_data'] = 500
params['min_hessian'] = 1
params['early_stopping_round'] = 30
#params['is_unbalance']='true'
params['bin_construct_sample_cnt'] = 100000
#%%
def train(filenames, kfold):
model_path = 'model/lgb_model_fold%d'%kfold
t0 = pd.DataFrame()
for f in filenames:
t1 = ft.read_dataframe(f)
print 't1.shape = ', t1.shape
print f
print t1.columns
if f == 'tmp/CV_train_2324_top20_addfea.feather':
hist = train_ds[train_ds.day<23]
if f == 'tmp/CV_train_2122_top20_addfea.feather':
hist = train_ds[(train_ds.day<21) | (train_ds.day >22)]
t1 = gen_features(t1, hist)
t0 = pd.concat([t0, t1])
process(t0, 12.0)
print 't0.shape = ', t0.shape
order = pd.Series(t0.orderid.unique())
train_order, valid_order = train_test_split(order, test_size=0.2, random_state=42)
t1 = t0[t0.orderid.isin(train_order)]
t2 = t0[t0.orderid.isin(valid_order)]
x_train = t1[predictors_006]
y_train = t1['label']
print 'feature count: %d'%x_train.shape[1]
x_valid = t2[predictors_006]
y_valid = t2['label']
if os.path.exists(model_path) & flag:
clf = lgb.Booster(model_file = model_path)
else:
print 'begin train, kfold = %d' % kfold
d_train = lgb.Dataset(x_train, label=y_train)
d_valid = lgb.Dataset(x_valid, label=y_valid)
watchlist = [d_train, d_valid]
clf = lgb.train(params, d_train, 1500, watchlist)
print ' 模型保存....\n'
clf.save_model('model/lgb_model_fold%d'%kfold)
print('Feature names:', clf.feature_name())
# feature importances
fea_useful = pd.Series(clf.feature_importance(), index = clf.feature_name()).sort_values(ascending = False)
fea_useful = fea_useful / fea_useful.sum()
print 'lgb fea shape = ', fea_useful.shape
print fea_useful
p_mean = clf.predict(t2[predictors_006])
limit_score = t2['label'].sum() / float(t2.orderid.unique().shape[0])
hit_score = getValidScore(t2, t2['label'], p_mean) / float(t2.orderid.unique().shape[0])
print 'Train理论上限值: %.3f, 模型得分: %.3f'%(limit_score, hit_score) #0.567, 0.405
print ' ...\n'
return clf
#fiels = ['tmp/CV_train_2122_top20_addfea.feather','tmp/CV_train_2324_top20_addfea.feather']
fiels = ['tmp/CV_train_2324_top20_addfea2.feather']
#fiels = ['tmp/CV_train_2122_top20_addfea.feather']
#model = train(fiels, 2124)
model = train(fiels, 2324)
#%% predict test
#model_path = 'model/lgb_model_fold%d'%2324
3model = lgb.Booster(model_file = model_path)
gc.collect()
sub_fea = ['orderid', 'candidate_end_loc', 'day']
def predict(ts):
ts = gen_features(ts, train_ds)
process(ts, 14.0)
printHeader('Model Predict')
y_test_pred = model.predict(ts[predictors_006])
#result = generateSubmission(ts, y_test_pred)
return y_test_pred
t1 = ft.read_dataframe('tmp/test_sample_feature2528_addfea.feather')
p1 = predict(t1[t1.day == 25])
p2 = predict(t1[t1.day == 26])
p3 = predict(t1[t1.day == 27])
p4 = predict(t1[t1.day == 28])
del t1
gc.collect()
t2 = ft.read_dataframe('tmp/test_sample_feature2932_addfea.feather')
p5 = predict(t2[t2.day <= 30])
p6 = predict(t2[t2.day > 30])
p = np.concatenate((p1,p2,p3,p4,p5,p6))
str_T = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
np.save('pred/p_%s.npy'%str_T, p)
#t = pd.concat([r1,r2,r3,r4,r5,r6])
del t2
gc.collect()
t = pd.read_csv('tmp/test_sample_ts.csv')
#result = pd.concat([r1,r2, r3, r4, r5, r6])
result = generateSubmission(t, p)
_,test = loadDataset()
result = pd.merge(test[['orderid']],result,on='orderid',how='left')
result.fillna('0000000',inplace=True)
result.to_csv('sub/lgb_%s.csv'%str_T, index=False,header=False)
print 'done!'
| {"/1_feature_leak.py": ["/util.py"]} |
57,429 | ChenFengling/mobike-cup | refs/heads/master | /util.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 10:35:00 2017
@author: leo
"""
import pandas as pd
import numpy as np
#import seaborn as sns
import os
import geohash
import math
from pandas_ply import install_ply, X, sym_call
import hashlib
install_ply(pd)
def convertUint16(df, cols):
for col in cols:
df[col] = df[col].astype('uint16')
df[col][df[col] == 0] = np.nan
def convertFloat32(df, cols):
for col in cols:
df[col] = df[col].astype('float32')
df[col][df[col] == 0] = np.nan
def convertUint8(df, cols):
for col in cols:
df[col] = df[col].astype('uint8')
df[col][df[col] == 0] = np.nan
def hashstr(str, nr_bins):
return int(hashlib.md5(str.encode('utf8')).hexdigest(), 16)%(nr_bins-1)+1
weekday = [10,11,12,15,16,17,18,19,22,23,24,25,26,27,31,1]
weekend = [13,14,20,21]
holiday = [28,29,30]
def leak_sub(sub):
leak= pd.read_csv('leak.csv', names = ['orderid', 'endloc_leak'])
compare = pd.merge(sub, leak, on='orderid', how='left')
compare['p3'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())] = compare['p2'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())]
compare['p2'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())] = compare['p1'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())]
compare['p1'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())] = compare['endloc_leak'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())]
compare = compare[['orderid', 'p1', 'p2', 'p3']]
return compare
def printHeader(title):
l = ''
for i in range(10):
l = l + '#'
l = l + title
for i in range(10):
l = l + '#'
print l
def processDatetime(ds):
ds['hour'] = ds.starttime.map(lambda x: int(x[11:13]) + float(x[14:16]) / 60)
ds['time'] = ''
ds.loc[:,'time'][(ds.hour>=6) & (ds.hour <9)] = 'Morning Peak'
ds.loc[:,'time'][(ds.hour>=9) & (ds.hour <11.5)] = 'Working Time'
ds.loc[:,'time'][(ds.hour>=11.5) & (ds.hour <13)] = 'Noon'
ds.loc[:,'time'][(ds.hour>=13) & (ds.hour <17)] = 'Working Time'
ds.loc[:,'time'][(ds.hour>=17) & (ds.hour <19)] = 'Evening Peak'
ds.loc[:,'time'][(ds.hour>=19) & (ds.hour <23)] = 'Night Time'
ds.loc[:,'time'][(ds.hour>=23) | (ds.hour <6)] = 'Sleeping Time'
ds['day'] = ds.starttime.map(lambda x: int(x[8:10]))
ds['date'] = 'weekday'
#ds['date'][ds.day.isin(weekday)] = 'weekday'
ds['date'][ds.day.isin(weekend)] = 'weekend'
ds['date'][ds.day.isin(holiday)] = 'weekend'
def decodeLocation(ds):
decode = np.vectorize(geohash.decode)
tmp = decode(ds["geohashed_start_loc"])
ds["latitude_start_loc"] = tmp[0] * math.pi / 180
ds["longitude_start_loc"] = tmp[1] * math.pi / 180
if "geohashed_end_loc" in ds.columns:
tmp = decode(ds["geohashed_end_loc"])
ds["latitude_end_loc"] = tmp[0]* math.pi / 180
ds["longitude_end_loc"] = tmp[1]* math.pi / 180
#just in train dataset
def calcDirection(ds):
if 'longitude_candidate_end_loc' in ds.columns:
ds['lngdelta'] = ds['longitude_candidate_end_loc'] - ds['longitude_start_loc']
ds['latdelta'] = ds['latitude_candidate_end_loc'] - ds['latitude_start_loc']
else:
ds['lngdelta'] = ds['longitude_end_loc'] - ds['longitude_start_loc']
ds['latdelta'] = ds['latitude_end_loc'] - ds['latitude_start_loc']
cos_v = np.vectorize(math.cos)
abs_v = np.vectorize(abs)
atan2_v = np.vectorize(math.atan2)
ds['deg'] = atan2_v(abs_v(cos_v(ds['latitude_start_loc']) * ds['lngdelta']),
abs_v(ds['latdelta'])) * 180.0 / math.pi
ds['deg'][(ds.latdelta>=0) & (ds.lngdelta<0)] = 360 - ds['deg'][(ds.latdelta>=0) & (ds.lngdelta<0)]
ds['deg'][(ds.latdelta<0) & (ds.lngdelta<=0)] = 180 + ds['deg'][(ds.latdelta<0) & (ds.lngdelta<=0)]
ds['deg'][(ds.latdelta<0) & (ds.lngdelta>0)] = 180 - ds['deg'][(ds.latdelta<0) & (ds.lngdelta>0)]
ds['Direction'] = '1'
ds['Direction'][(ds['deg'] >= 22.5) & (ds['deg'] < 67.5)] = 'NE'
ds['Direction'][(ds['deg'] >= 67.5) & (ds['deg'] < 112.5)] = 'E'
ds['Direction'][(ds['deg'] >= 112.5) & (ds['deg'] < 157.5)] = 'SE'
ds['Direction'][(ds['deg'] >= 157.5) & (ds['deg'] < 202.5)] = 'S'
ds['Direction'][(ds['deg'] >= 202.5) & (ds['deg'] < 247.5)] = 'SW'
ds['Direction'][(ds['deg'] >= 247.5) & (ds['deg'] < 292.5)] = 'W'
ds['Direction'][(ds['deg'] >= 292.5) & (ds['deg'] < 337.5)] = 'NW'
ds['Direction'][(ds['deg'] >= 337.5) | (ds['deg'] < 22.5)] = 'N'
R = 6378137
if 'longitude_candidate_end_loc' in ds.columns:
ds['dist']= abs_v(R*(ds['latitude_candidate_end_loc']-ds['latitude_start_loc']))+abs_v(R*cos_v(ds['latitude_start_loc'])*ds['lngdelta'])
else:
ds['dist']= abs_v(R*(ds['latitude_end_loc']-ds['latitude_start_loc']))+abs_v(R*cos_v(ds['latitude_start_loc'])*ds['lngdelta'])
def encodeDist(ds):
ds['dist_code'] = 'normal'
ds['dist_code'][ds.dist < 140] = 'shortshort'
ds['dist_code'][(ds.dist < 800) & (ds.dist >= 140)] = 'short'
ds['dist_code'][(ds.dist < 2400) & (ds.dist >= 1200)] = 'long'
ds['dist_code'][ds.dist >= 2400] = 'longlong'
def loadDataset():
if os.path.exists('./train.h5'):
train_file = './train.h5'
train_dataset = pd.read_hdf(train_file)
else:
train_file = './train.csv'
train_dataset = pd.read_csv(train_file)
decodeLocation(train_dataset)
calcDirection(train_dataset)
processDatetime(train_dataset)
train_dataset.to_hdf('train.h5','tabel')
if os.path.exists('./test.h5'):
test_file = './test.h5'
test_dataset = pd.read_hdf(test_file)
else:
test_file = './test.csv'
test_dataset = pd.read_csv(test_file)
decodeLocation(test_dataset)
processDatetime(test_dataset)
test_dataset.to_hdf('test.h5','tabel')
return train_dataset, test_dataset
def getOldUserDS(train, test):
train_user1 = train[train.userid.isin(test.userid)]
test_user1 = test[test.userid.isin(train.userid)]
return train_user1, test_user1
def getNewUserDS(train, test):
train_user2 = train[~(train.userid.isin(test.userid))]
test_user2 = test[~(test.userid.isin(train.userid))]
return train_user2, test_user2
def getTinyDS(train, test):
train_rows = np.random.choice(train.index.values, 100000)
sampled_train = train.ix[train_rows]
test_rows = np.random.choice(test.index.values, 1000)
samples_test = test.ix[test_rows]
return sampled_train, samples_test
def get_user_loc(train, test):
col_start = ['orderid','userid', 'geohashed_start_loc']
col_end = ['orderid','userid', 'geohashed_end_loc']
user_start = pd.concat([train[col_start], test[col_start]])
user_end = train[col_end]
tmp1 = user_start.groupby(['userid', 'geohashed_start_loc']).ply_select(start_num = X.orderid.count()).reset_index()
tmp2 = user_end.groupby(['userid', 'geohashed_end_loc']).ply_select(end_num = X.orderid.count()).reset_index()
tmp3 = tmp1.rename(columns={'geohashed_start_loc':'candidate_end_loc'})
del tmp3['start_num']
tmp4 = tmp2.rename(columns={'geohashed_end_loc':'candidate_end_loc'})
del tmp4['end_num']
user_loc = pd.concat([tmp3, tmp4])
user_loc.drop_duplicates(inplace=True)
decode = np.vectorize(geohash.decode)
tmp = decode(user_loc["candidate_end_loc"])
user_loc["latitude_candidate_end_loc"] = tmp[0]* math.pi / 180
user_loc["longitude_candidate_end_loc"] = tmp[1]* math.pi / 180
return user_loc
def get_user_end_loc(train,test):
user_eloc = train[['userid','geohashed_end_loc', 'longitude_end_loc', 'latitude_end_loc']].drop_duplicates()
result = pd.merge(test[['orderid','userid']],user_eloc, on='userid',how='left')
result = result[['orderid', 'geohashed_end_loc', 'longitude_end_loc', 'latitude_end_loc']]
return result
def get_user_start_loc(train, test):
user_sloc = train[['userid','geohashed_start_loc', 'longitude_start_loc', 'latitude_start_loc']].drop_duplicates()
result = pd.merge(test[['orderid', 'userid']], user_sloc, on='userid', how='left')
result.rename(columns={'geohashed_start_loc' : 'geohashed_end_loc',
'longitude_start_loc' : 'longitude_end_loc',
'latitude_start_loc': 'latitude_end_loc'},inplace=True)
result = result[['orderid', 'geohashed_end_loc', 'longitude_end_loc', 'latitude_end_loc']]
return result
def get_start_top3end(train, test):
sloc_eloc_count = train.groupby(['geohashed_start_loc', 'geohashed_end_loc', 'longitude_end_loc', 'latitude_end_loc'],as_index=False)['geohashed_end_loc'].agg({'sloc_eloc_count':'count'})
sloc_eloc_count.sort_values('sloc_eloc_count',inplace=True)
sloc_eloc_count = sloc_eloc_count.groupby('geohashed_start_loc').tail(3)
result = pd.merge(test[['orderid', 'geohashed_start_loc']], sloc_eloc_count, on='geohashed_start_loc', how='left')
result = result[['orderid', 'geohashed_end_loc', 'longitude_end_loc', 'latitude_end_loc']]
return result
def user_end_pairs(train):
user_end_pairs = train.groupby(['userid',
'geohashed_end_loc',
'longitude_end_loc',
'latitude_end_loc'])['orderid'].count().reset_index(name = 'count')
user_end_pairs = user_end_pairs.rename(columns={'geohashed_end_loc' : 'candidate_end_loc',
'longitude_end_loc' : 'longitude_candidate_end_loc',
'latitude_end_loc' : 'latitude_candidate_end_loc'})
del user_end_pairs['count']
return user_end_pairs
def bike_end_pair(train):
bike_end_pair = train.groupby(['bikeid',
'geohashed_end_loc',
'longitude_end_loc',
'latitude_end_loc'])['orderid'].count().reset_index(name = 'count')
bike_end_pair = bike_end_pair.rename(columns={'geohashed_end_loc' : 'candidate_end_loc',
'longitude_end_loc' : 'longitude_candidate_end_loc',
'latitude_end_loc' : 'latitude_candidate_end_loc'})
#del bike_end_pair['count']
return bike_end_pair
def factorizeCat(train, test, category_features):
ds = pd.concat([train, test])
l = {}
for feature in category_features:
if feature == 'candidate_end_loc':
list1 = train['geohashed_end_loc'].tolist()
list2 = ds['geohashed_start_loc'].tolist()
_,l[feature] = pd.factorize(list1+list2)
elif feature == 'Direction':
_,l[feature] = pd.factorize(['S', 'SE', 'SW', 'N', 'E', 'NW', 'W', 'NE'])
elif feature == 'dist_code':
_,l[feature] = pd.factorize(['shortshort', 'short','normal','long', 'longlong'])
else:
_,l[feature] = pd.factorize(ds[feature])
return l
def buildTrainSet_by_orderEnd(ds, order_end):
ds_merge = pd.merge(ds, order_end, on = 'orderid', how='left')
ds_merge = ds_merge[~(ds_merge['geohashed_start_loc'] == ds_merge['candidate_end_loc'])]
ds_merge = ds_merge[ds_merge.candidate_end_loc.notnull()]
calcDirection(ds_merge)
encodeDist(ds_merge)
return ds_merge
def buildTrainSet_by_UserEnd(ds, user_end_pairs):
ds_merge = pd.merge(ds, user_end_pairs, on = ['userid'], how = 'left')
# if('geohashed_end_loc' in ds.columns):
# del ds_merge['Direction_x']
# del ds_merge['dist_x']
# ds_merge = ds_merge.rename(columns = {'dist_y':'dist','Direction_y':'Direction'})
#
#ds_merge = ds_merge[~(ds_merge['geohashed_start_loc'] == ds_merge['candidate_end_loc'])]
#ds_merge = ds_merge[~(ds_merge.candidate_end_loc.isnull())]
# if('geohashed_end_loc' in ds.columns):
# ds_merge['label'] = ds_merge['geohashed_end_loc'] == ds_merge['candidate_end_loc']
# ds_merge['label'] = ds_merge['label'].map(int)
# else:
# ds_merge['label'] = 0
calcDirection(ds_merge)
#ds_merge = ds_merge[ds_merge.dist < 3000]
encodeDist(ds_merge)
return ds_merge
def getPartSumbbission(test, filename, newfile):
part = pd.read_csv(filename, names = ['orderid', 'p1', 'p2', 'p3'])
all_sub = pd.merge(test, part, on ='orderid', how = 'left')
all_sub = all_sub.fillna('0000000')
all_sub = all_sub[['orderid','p1', 'p2', 'p3']]
all_sub.to_csv(newfile, header = None, index = False)
def getValidScore(x_valid, y_valid, yv_pred):
y_pred = yv_pred
x_valid['label_pred'] = y_pred
top3 = x_valid.sort_values(['orderid','label_pred'],ascending=[True, False]).groupby(['orderid']).head(3)
top3['group_sort']=top3['label_pred'].groupby(top3['orderid']).rank(ascending=0,method='first')
sub = top3.pivot_table(index='orderid', columns='group_sort', values='candidate_end_loc',aggfunc=lambda x: ' '.join(x)).reset_index()
sub = sub.rename(columns ={1.0:'p1', 2.0:'p2', 3.0:'p3'})
valid_compare = pd.merge(x_valid[y_valid == 1], sub, on ='orderid', how= 'left')
score = valid_compare[valid_compare['geohashed_end_loc'] == valid_compare['p1']].shape[0] + \
valid_compare[valid_compare['geohashed_end_loc'] == valid_compare['p2']].shape[0] / 2.0 + \
valid_compare[valid_compare['geohashed_end_loc'] == valid_compare['p3']].shape[0] /3.0
score = float(score)
return score
def generateSubmission(test, y_pred, filename = 'submission.csv'):
test['label_pred'] = y_pred
top3 = test.sort_values(['orderid','label_pred'],ascending=[True, False]).groupby(['orderid']).head(3)
top3['group_sort']=top3['label_pred'].groupby(top3['orderid']).rank(ascending=0,method='first')
sub = top3.pivot_table(index='orderid', columns='group_sort', values='candidate_end_loc',aggfunc=lambda x: ' '.join(x)).reset_index()
sub = sub.rename(columns ={1.0:'p1', 2.0:'p2', 3.0:'p3'})
return sub
#sub.to_csv(filename, header = None, index = False)
def repair_by_leak(subfile, leakfle):
sub = pd.read_csv(subfile, names = ['orderid', 'p1', 'p2', 'p3'])
leak = pd.read_csv(leakfle)
compare = pd.merge(sub, leak, on='orderid', how='left')
compare['p3'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())] = compare['p2'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())]
compare['p2'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())] = compare['p1'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())]
compare['p1'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())] = compare['endloc_leak'][(compare.p1 != compare.endloc_leak) & (compare.endloc_leak.notnull())]
return compare[['orderid','p1', 'p2', 'p3']]
def resortCol(data):
cols = list(data)
cols.insert(0, cols.pop(cols.index('label')))
data = data.ix[:, cols]
return data
def toBayesianFile(d, dict_of_features_factorized, filename):
idx = 1
l = {}
for col in d.columns:
idx = idx + 1
print '开始转换', col
col_list = dict_of_features_factorized[col].tolist()
l[col] = {dict_of_features_factorized[col][i] : i for i in range(0, len(col_list))}
d.loc[:,col] = d[col].map(l[col])
d.to_csv(filename, header = None, index = False) | {"/1_feature_leak.py": ["/util.py"]} |
57,432 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/settings/base.py | import os
import sys
PROJECT_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(os.path.join(PROJECT_DIR, 'apps'))
PUBLIC_DIR = os.path.join(PROJECT_DIR, '..', 'public')
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
ALLOWED_HOSTS = []
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_ROOT = os.path.join(PUBLIC_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PUBLIC_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django_extensions',
'south',
'django_ace',
'chord',
'configurator'
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(PROJECT_DIR, '../', 'logs', 'guitar.log'),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 50,
'formatter': 'standard',
},
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'level': 'ERROR',
}
}
}
APPS_CONFIGS_PATH = os.path.join(PROJECT_DIR, '..', 'apps_configurations')
from local import *
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,433 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/configurator/utils.py | from django.conf import settings
import os
import json
def prepare_configuration_json(app_name):
path = os.path.join(settings.APPS_CONFIGS_PATH, app_name)
print path
if not os.path.exists(path):
# We haven't config for given app_name
return None
with open(os.path.join(path, 'config.json'), 'r') as config_file:
config = json.loads(config_file.read())
validate_config(config)
# Load templates from files
for patch_config in config:
if patch_config.get('template_type') == 'file':
with open(os.path.join(path, '%s.tpl' % patch_config['type']), 'r') as template_file:
patch_config['template'] = template_file.read()
return json.dumps(config)
def validate_config(config):
# TODO: do it)
return True
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,434 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/cmd/__init__.py | """
guitar django packages manager
Usage:
guitar [-h | -V]
guitar install <name>... [-vqNOD]
guitar install <name>... [-U=<upath>] [-S=<spath>] [-L=<lspath>]
guitar investigate [--save]
guitar search <name>
guitar create <name>
Options:
-h --help Show this screen.
-V --version Show version.
-v --verbose Show more output.
-q --quiet Show no output
-N --no-patch Do not write any changes, only dump on screen.
-O --overwrite Allow to overwrite existing lines.
-D --default Do not ask any questions. Use defaults.
-l --local-settings Write changes to "local settings" file only.
--save Save investigated into .guitar file
-U --urls-file=<upath>
-S --settings-file=<spath>
-L --local-settings-file=<lspath>
"""
import os
from docopt import docopt
import handlers.install
import handlers.search
import handlers.create
import handlers.investigate
HANDLERS = {
'install': handlers.install.InstallHandler,
'search': handlers.search.SearchHandler,
'investigate': handlers.investigate.InvestigateHandler,
'create': handlers.create.CreateHandler,
# Key is so strange as it is should be similar to key in
# arguments dict, returned by docopt.
'--version': handlers.base.VersionHandler,
}
class Router(object):
def __init__(self, arguments):
self.options = Options(arguments)
self.command = self.get_command(arguments)
def get_command(self, arguments):
# As docopt allow to have many commands for single progremm.
# We require only one. So that, let's go throught all keys for
# dict, which docopt return and find those, where value = True
commands = [x for x in HANDLERS.keys() if arguments.get(x)]
assert len(commands) > 0, 'Seems we have command w/o related handler.'
assert len(commands) < 2, 'We accept only one command per call'
return commands[0]
def route(self):
# There is dict of available handlers, imported from handlers package.
# We found right handler by key=command and provide 2 args:
# command name and prettified options.
HANDLERS[self.command](self.command, self.options)
class Options(object):
def __init__(self, arguments):
self.cwd = os.getcwd()
# Let's resolve absolute path's to files
self.urls_file_path = self.get_full_path(arguments['--urls-file'])
self.settings_file_path = self.get_full_path(arguments['--settings-file'])
self.local_settings_file_path = self.get_full_path(arguments['--local-settings-file'])
self.verbose = arguments['--verbose']
self.quiet = arguments['--quiet']
self.save = arguments['--save']
self.use_defaults = arguments['--default']
self.do_not_patch = arguments['--no-patch']
self.overwrite = arguments['--overwrite']
# We no need ability to list package twice or so -> set()
self.packages = list(set(arguments['<name>']))
def get_full_path(self, path):
if path:
full_path = os.path.abspath(os.path.join(self.cwd, path))
if not os.path.exists(full_path):
print("File {} does not exists.".format(full_path)) # better raise?
return full_path
arguments = docopt(__doc__)
router = Router(arguments)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,435 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/cmd/handlers/investigate.py | from .base import CommandHandler
class InvestigateHandler(CommandHandler):
def handle(self):
assert len(self.options.packages) == 0, 'Investigate do not require any package'
print('Investigation emulation...')
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,436 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/patcher/__init__.py | from item_patchers import (
SettingsPatcher, MiddlewarePatcher,
AppsPatcher, UrlsPatcher, ValidationError)
class Patcher:
patchers = {
'middleware': MiddlewarePatcher(),
'settings': SettingsPatcher(),
'apps': AppsPatcher(),
'urls': UrlsPatcher()
}
def __init__(self):
"""
Add patches to Django settings and urls file
"""
self.files = _Files()
def patch(self, patch_obj, override_files=True):
"""
Add code to django settings and urls files
:param patch_obj: Object, generated by confugurtor.Configurator
"""
try:
for patcher_name, patcher_obj in patch_obj.items():
# Get file content for given file patch
file_content = self.files.open(patcher_obj['file_path'])
# Apply patch to file content
chenged_file_content = self.patchers[patcher_name].apply(file_content, patcher_obj['patch'])
# Update file content in memory for the next patch
self.files.update(patcher_obj['file_path'], chenged_file_content)
except ValidationError:
#TODO
pass
if override_files:
self.files.write_all()
@classmethod
def available_patchers(cls):
return cls.patchers.keys()
class _Files:
def __init__(self):
self.contents = {}
def open(self, path):
if not path in self.contents:
with open(path, 'r') as f:
self.contents[path] = f.read()
return self.contents[path]
def write(self, path):
if not path in self.contents:
return
with open(path, 'w') as f:
f.write(self.contents[path])
def write_all(self):
# Write all changes to files
for path, content in self.contents.items():
with open(path, 'w') as f:
f.write(content)
def update(self, path, content):
self.contents[path] = content
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,437 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/setup.py | from distutils.core import setup
# setuptools is required to call `setup.py develop`
#import setuptools
from guitar import VERSION
setup(
name='guitar',
version=VERSION,
author='Roman Osipenko & Dmitry Upolovnikov',
author_email='roman.osipenko@djangostars.com',
url='http://guitar.djangostars.com/',
description='django package manager',
packages=['guitar'],
scripts=['bin/guitar'],
keywords='django package configure install scaffold',
license='MIT',
long_description=open('README.txt').read(),
install_requires=[
"pip>= 1.3.1",
"clint>= 0.3.1",
"docopt>= 0.6.1"
],
#test_suite='tests',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,438 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/cmd/handlers/install.py | from .base import CommandHandler
from guitar import fetcher
class InstallHandler(CommandHandler):
def handle(self):
for package in self.options.packages:
print('Installation emulation for package `{}`'.format(package))
print('Fetching configuration file...')
config = fetcher.fetcher.get_config(package)
if config:
print('Fetching OK. Continue...')
# TODO: Check if package already installed by pip
# TODO: Investigate project structure to find required files.
# TODO: Ask questions
# TODO: Write changes to settings, urls, requirements.
else:
print('Fetching NOT FOUND...')
# Check https://pypi.python.org/pypi/{} if package really exist.
# XXX: We can have configurations, not related to packages, like:
# - simplify CACHE, DATABASE, DEFAULT_CONTEXT_PROCESSORS configuration.
# - scaffolding?
# So that is is not necessary, that pypi should return 200
print('We do not have configuration for package you trying to install.')
print('You can help, by contributing such configuration,')
print('To do so, type: `guitar create {}` to create barebone configuration.'.format(package))
#print inquirer.dialogs.YesNo('Do you want to simply install apckage using `pip`?').do()
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,439 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/chord/urls_api.py | from django.conf.urls import patterns, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('chord.views_api',
url(r'^search/(?P<q>.+)/$', 'api_chord_search', name="search"),
url(r'^chords/(?P<title>[-_\w]+)/$', 'api_chord_config', name="details"),
)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,440 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/cmd/handlers/search.py | from guitar.inquirer import messages
from guitar import fetcher
from .base import CommandHandler
class SearchHandler(CommandHandler):
def handle(self):
assert len(self.options.packages) == 1, 'We can search only single package'
package = self.options.packages[0]
messages.message('Searching emulation for package `{}`'.format(package))
res = fetcher.fetcher.search(package)
if res and res['status'] == "OK":
messages.success("Found by `{}`".format(package))
for p in res['chords']:
messages.message("> {}".format(p))
else:
messages.error("No Found.")
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,441 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/cmd/handlers/create.py | from .base import CommandHandler
class CreateHandler(CommandHandler):
def handle(self):
assert len(self.options.packages) == 1, 'We can create only single package'
package = self.options.packages[0]
print('Creation emulation for package `{}`'.format(package))
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,442 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/chord/urls.py | from django.conf.urls import patterns, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('chord.views',
url(r'^chords/$', 'chord_list', name="list"),
url(r'^chords/(?P<title>[-_\w]+)/$', 'chord_details', name="details"),
)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,443 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/chord/models.py | from django.db import models
class ChordManager(models.Manager):
def get_query_set(self):
return super(ChordManager, self).get_query_set().filter(is_active=True)
class Chord(models.Model):
# We are safe to use title as slug as it is not possible to have
# package name, which is not URL friendly (txh: PyPI)
title = models.CharField(max_length=255, unique=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
configuration = models.TextField(blank=True, null=True)
is_active = models.BooleanField(default=True)
objects = models.Manager()
active = ChordManager()
# Stats, meta info, etc -> as soon as main goals of `guitar` will be solved.
def __unicode__(self):
return self.title
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,444 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/chord/views_api.py | import json
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from .models import Chord
def api_chord_search(request, q):
chords = Chord.active.filter(title__icontains=q).values_list('title', flat=True)
data = {
'status': 'OK',
'chords': list(chords),
}
return HttpResponse(json.dumps(data), content_type='application/json')
def api_chord_config(request, title):
chord = get_object_or_404(Chord.active, title=title)
return HttpResponse(chord.configuration, content_type='application/json')
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,445 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/tests/configurator_test.py | import unittest
import json
from guitar.configurator import Configurator
class TestConfigurator(unittest.TestCase):
config_path = 'tests/test_config_json.txt'
maxDiff = None
def test_configurator(self):
with open(self.config_path, 'r') as f:
config = json.loads(f.read())
file_paths = {
'settings': 'dummy',
'urls': 'dummy2',
'installed_apps': 'dummy',
}
configurator = Configurator(config, file_paths)
answers = [1, 'my_db_name', 'my_db_user', '^test_url/', None]
for question in configurator:
answer = answers.pop(0)
self.assertTrue(question['title'])
question_answers = question.get('answers', [])
for _answer in question_answers:
if _answer['key'] == answer:
question.answer(answer)
continue
if question['type'] == 'input':
question.answer(answer)
patches = configurator.get_patches()
patches_expect = {
u'installed_apps': {
'file_path': 'dummy',
'patch': {'item_to_add': u'foo.bar', 'after': u'django.contrib.sessions', 'before': None}
},
u'urls': {
'file_path': 'dummy2',
'patch': {'item_to_add': u"url(r'^test_url/', include('foo.urls'))", 'after': None, 'before': None}
},
u'settings': {
'file_path': 'dummy',
'patch': {
'item_to_add': u"DATABASES = {\n 'default': {\n 'ENGINE': 'postgresql',\n 'NAME': '',\n 'USER': 'my_db_name',\n 'PASSWORD': 'my_db_user',\n 'HOST': '',\n 'PORT': '',\n }\n}\n",
'after': None,
'before': None
}
}
}
self.assertDictEqual(patches_expect, patches)
def test_skip_questions(self):
with open(self.config_path, 'r') as f:
config = json.loads(f.read())
file_paths = {
'settings': 'dummy',
'urls': 'dummy2',
'installed_apps': 'dummy',
}
configurator = Configurator(config, file_paths)
answers = [3, '^test_url/', None]
for question in configurator:
answer = answers.pop(0)
self.assertTrue(question['title'])
question_answers = question.get('answers', [])
for _answer in question_answers:
if _answer['key'] == answer:
question.answer(answer)
continue
if question['type'] == 'input':
question.answer(answer)
patches = configurator.get_patches()
patches_expect = {
u'installed_apps': {
'file_path': 'dummy',
'patch': {
'item_to_add': u'foo.bar',
'after': u'django.contrib.sessions',
'before': None
}
},
u'urls': {
'file_path': 'dummy2',
'patch': {
'item_to_add': u"url(r'^test_url/', include('foo.urls'))",
'after': None,
'before': None
}
},
u'settings': {
'file_path': 'dummy',
'patch': {
'item_to_add': u"DATABASES = {\n 'default': {\n 'ENGINE': 'sqlite3',\n 'NAME': '',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n",
'after': None,
'before': None
}
}
}
self.assertDictEqual(patches_expect, patches)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,446 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/inquirer/dialogs.py | from textwrap import fill
from clint.textui import colored
from . import MAX_WIDTH
class DialogBase(object):
question_prefix = "> " # Prefix will be showed for first line of question
question_prefix_len = len(question_prefix) # Help text (if given), will be indented for len of prefix
prompt = ":"
def __init__(self, title, help_text=None, default=None, input_function=None):
self.title = title
self.help_text = help_text
self.default = default
if self.default is not None:
assert self.validate(self.default), "Default value is not valid!"
self.input_function = input_function or raw_input # For test, should be replaced by `lambda: "input text"`
self.value = None
self.setup()
def setup(self):
pass
def get_title_line(self):
# First line of question. The only line if not help text provided.
return "{}{}".format(self.question_prefix, self.title)
def get_help_text(self):
# We need reformat help text to fit into MAX_WIDTH and to be indented in the same time.
help_lines = self.help_text.split("\n")
# Reformat each line, by fit it into adapted MAX_WIDTH, with respect to indent and then prefixed by indent.
return "\n".join(
[
"{}{}".format(
" " * self.question_prefix_len,
fill(x, MAX_WIDTH - self.question_prefix_len)
) for x in help_lines
]
)
def get_prompt(self):
if self.default:
return "[{}]{}".format(self.default, self.prompt)
return self.prompt
def validate(self, value):
if value.strip():
return value.strip()
def check_default(self, value):
if value.strip() == "" and self.default is not None:
return True
else:
return False
def cleanup(self, value):
return value
def render(self):
# In case help_text is provided, message will be multi line. Otherwise - not.
# Because of this we should handle prompt differently.
if self.help_text:
print(colored.blue(self.get_title_line()))
print(self.get_help_text())
print("{}{}".format(" " * self.question_prefix_len, self.get_prompt())),
else:
if self.default is not None:
print("{} {}".format(colored.blue(self.get_title_line()), self.get_prompt())),
else:
print("{}{}".format(colored.blue(self.get_title_line()), self.get_prompt())),
def do(self):
while True:
self.render()
value = self.input_function()
if self.check_default(value):
value = self.default
if self.validate(value):
self.value = value
return self.cleanup(value)
class Ask(DialogBase):
pass
class YesNo(DialogBase):
ANSWERS_YES = "y yes true sure ok 1 tada".split()
ANSWERS_NO = "n no not false 0 nah".split()
def setup(self):
if self.default is None:
self.default = False
def get_prompt(self):
if self.default:
return "[Y/n]{}".format(self.prompt)
else:
return "[N/y]{}".format(self.prompt)
def validate(self, value):
# As typing "RETURN" will return True or False from `check_default`, we add those values there.
return value in self.ANSWERS_NO or value in self.ANSWERS_YES or value in (True, False)
def cleanup(self, value):
if value in self.ANSWERS_YES:
return True
else:
return False
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,447 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/chord/forms.py | from django import forms
from django_ace import AceWidget
from .models import Chord
class ChordAdminForm(forms.ModelForm):
class Meta:
model = Chord
widgets = {
'configuration': AceWidget(mode='json', theme='solarized_light', attrs={'style': 'width:1000px'})
}
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,448 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/tests/patcher_test.py | import unittest
import shutil
import os
from guitar.patcher import Patcher, SettingsPatcher, MiddlewarePatcher, AppsPatcher, UrlsPatcher
class TestPatcher(unittest.TestCase):
settings_py_path = 'tests/settings_py_copy.txt'
settings_py_expect_path = 'tests/settings_py_expect.txt'
def setUp(self):
# Copy settings example
shutil.copy2('tests/settings_py.txt', self.settings_py_path)
def tearDown(self):
# Remove settings example
os.remove(self.settings_py_path)
def test_patcher(self):
patcher_obj = {
'settings': {
'file_path': self.settings_py_path,
'patch': {'item_to_add': "FOO='BAR'\nAPP_DATA = {'x': 5, 'y':['1','2','3']}"}
}
}
Patcher().patch(patcher_obj)
with open(self.settings_py_path, 'r') as f:
content = f.read()
with open(self.settings_py_expect_path, 'r') as f:
content_expect = f.read()
self.assertEqual(content_expect, content)
class TestPatchSettings(unittest.TestCase):
settings_py = """
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
settings_py_after_patch = """
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
FOO='BAR'
APP_DATA = {'x': 5, 'y':['1','2','3']}
"""
def test_patch_settings(self):
patch_obj = {'item_to_add': "FOO='BAR'\nAPP_DATA = {'x': 5, 'y':['1','2','3']}"}
new_settings_py = SettingsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(self.settings_py_after_patch, new_settings_py)
class TestMiddlewarePatcher(unittest.TestCase):
settings_py = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
def test_patch_after(self):
settings_py_append_after = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'foo.middleware.bar',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.middleware.csrf.CsrfViewMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_after, new_settings_py)
def test_append_before(self):
settings_py_append_before = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'foo.middleware.bar',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.middleware.csrf.CsrfViewMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_first(self):
settings_py_append_before = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'foo.middleware.bar',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.middleware.common.CommonMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_last(self):
settings_py_append_before = """
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'foo.middleware.bar',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.contrib.messages.middleware.MessageMiddleware', 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
patch_obj = {'before': None, 'after': None, 'item_to_add': 'foo.middleware.bar'}
new_settings_py = MiddlewarePatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
class AppsTestPatcher(unittest.TestCase):
settings_py = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
def test_patch_after(self):
settings_py_append_after = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'foo.bar',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.contrib.sites', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_after, new_settings_py)
def test_append_before(self):
settings_py_append_before = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'foo.bar',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.contrib.sites', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_first(self):
settings_py_append_before = """
INSTALLED_APPS = (
'foo.bar',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'after': None, 'before': 'django.contrib.auth', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_last(self):
settings_py_append_before = """
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'foo.bar',
)
ROOT_URLCONF = 'guitar.urls'
WSGI_APPLICATION = 'guitar.wsgi.application'
"""
patch_obj = {'before': None, 'after': 'django.contrib.admin', 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
patch_obj = {'before': None, 'after': None, 'item_to_add': 'foo.bar'}
new_settings_py = AppsPatcher().apply_patch(self.settings_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
class UrlsTestPatcher(unittest.TestCase):
urls_py = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
def test_patch_after(self):
settings_py_append_after = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(_(r'^foo/'), include('foo.urls')),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
patch_obj = {'before': None, 'after': '^admin/', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_urls_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_after, new_urls_py)
def test_append_before(self):
urls_py_append_before = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(_(r'^foo/'), include('foo.urls')),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
patch_obj = {'after': None, 'before': 'django.views.generic.simple.redirect_to', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(urls_py_append_before, new_settings_py)
def test_append_first(self):
settings_py_append_before = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(_(r'^foo/'), include('foo.urls')),
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls'))
)
"""
patch_obj = {'after': None, 'before': 'admin.site.urls', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
def test_append_last(self):
settings_py_append_before = """
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(
r'^admin/',
include(admin.site.urls)
),
url(
_(r'^branch/(?P<slug>[+\w\s-]+)/$'),
'ololo.views.trololo',
name='ololo-trololo'),
url(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to',
{'url': os.path.join(settings.STATIC_URL, 'i/favicon.ico')}),
url(_(r'^accounts/'), include('profiles.urls')),
url(_(r'^accounts/'), include('django.contrib.auth.urls')),
url(_(r'^foo/'), include('foo.urls')),
)
"""
patch_obj = {'before': None, 'after': 'django.contrib.auth.urls', 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
patch_obj = {'before': None, 'after': None, 'item_to_add': "url(_(r'^foo/'), include('foo.urls'))"}
new_settings_py = UrlsPatcher().apply_patch(self.urls_py, patch_obj)
self.assertEqual(settings_py_append_before, new_settings_py)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,449 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/configurator/tests/test_open_config.py | import os
from django.test import TestCase
from django.conf import settings
from configurator.utils import prepare_configuration_json
__all__ = [
'TestOpenConfig'
]
TESTS_PATH = os.path.join(settings.PROJECT_DIR, 'configurator', 'tests')
class TestOpenConfig(TestCase):
def test_open(self):
app_name = 'test'
config_json = prepare_configuration_json(app_name)
with open(os.path.join(TESTS_PATH, 'test_open_config_expect.txt'), 'r') as expect:
self.assertEqual(expect.read(), config_json + '\n')
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,450 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/fetcher/__init__.py | import urllib2
import json
FAKE_PACKAGES = (
'south',
'django-debug-toolbar',
'django-extensions',
'django-social-auth',
)
class GuitarWebAPI(object):
def __init__(self, url):
self.url = url
def search(self, q):
url = self.url + 'search/' + q + '/'
res = urllib2.urlopen(url)
return json.loads(res.read())
def get_config(self, package, version=None):
url = self.url + 'search/' + package + '/'
print url
res = urllib2.urlopen(url)
print res
fetcher = GuitarWebAPI('http://localhost:8000/api/v1/')
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,451 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/inquirer/__init__.py | MAX_WIDTH = 79
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,452 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/cmd/handlers/base.py | from guitar import VERSION
class CommandHandler(object):
def __init__(self, command, options):
self.options = options
self.command = command
self.handle()
def handle(self):
raise NotImplementedError('You should overwrite `handle` method of `{}`'.format(self.__class__.__name__))
class VersionHandler(CommandHandler):
def handle(self):
print("guitar {}".format(VERSION))
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,453 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/tests/__init__.py | import patcher_test
import configurator_test
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,454 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/configurator/__init__.py | class Question(dict):
def __init__(self, patcher_type, configurator, **kwargs):
self.configurator = configurator
self.patcher_type = patcher_type
super(Question, self).__init__(**kwargs)
# dict answer_id => (answer_variable, answer_variable_value)
self.answers = {}
self.answers = dict(
(answer['key'], (answer.get('variable') or self.get('variable'), answer.get('value')))
for answer in self.get('answers', [])
)
def answer(self, answer_key):
if self['type'] == 'input':
variable, answer = (self['variable'], answer_key or self.get('default'))
else:
variable, answer = self.answers[answer_key]
self.configurator.set_variable(self.patcher_type, variable, answer)
class Configurator(object):
def __init__(self, config_json, file_paths):
self.templates_data = {}
self.config = config_json
self.questions = []
self.file_paths = file_paths
for pather_config in config_json:
template_variables = pather_config.get('variables')
# Set place, where template data will be situated
if template_variables:
self.templates_data[pather_config['type']] = dict(
zip(template_variables, [''] * len(template_variables))
)
else:
self.templates_data[pather_config['type']] = None
# Initialize questions
for question in pather_config.get('questions', []):
answer_id = 0
for answer in question.get('answers', []):
answer['key'] = answer_id
answer_id += 1
self.questions.append(Question(pather_config['type'], self, **question))
def __iter__(self):
return self
def next(self):
if not self.questions:
raise StopIteration
question = self.questions.pop(0)
if 'exclude' in question:
exclude = False
for excl_condition in question['exclude']:
patcher_data = self.templates_data[question.patcher_type]
# TODO: check exclude format
if patcher_data.get(excl_condition['variable']) == excl_condition['value']:
exclude = True
break
if exclude:
return self.next()
return question
def set_variable(self, patcher_type, variable, value):
self.templates_data[patcher_type][variable] = value
def get_patches(self):
patches = {}
for patcher_config in self.config:
# TODO: validation
patch_type = patcher_config['type']
template = patcher_config['template']
if self.templates_data[patch_type]:
template %= self.templates_data[patch_type]
patches[patch_type] = {
'patch': {
'item_to_add': template,
'before': patcher_config.get('add_before'),
'after': patcher_config.get('add_after')
},
'file_path': self.file_paths[patch_type]
}
return patches
def get_template_variables(self, template):
return [item[1] for item in Formatter().parse(template) if item]
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,455 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/configurator/tests/__init__.py | from guitar.apps.configurator.tests.test_open_config import *
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,456 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/chord/admin.py | from django.contrib import admin
from .models import Chord
from .forms import ChordAdminForm
class ChordAdmin(admin.ModelAdmin):
form = ChordAdminForm
list_display = ('title', 'is_active', 'updated')
fieldsets = (
(None, {
'fields': (('title', 'is_active'),)
}),
(None, {
'fields': ('configuration',)
})
)
admin.site.register(Chord, ChordAdmin)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,457 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'chord.views.home', name="home"),
url(r'', include("chord.urls", namespace='chord')),
url(r'api/v1/', include("chord.urls_api", namespace='api')),
url(r'^admin/', include(admin.site.urls)),
)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,458 | django-stars/guitar | refs/heads/master | /guitar-web/src/guitar/apps/chord/views.py | from django.shortcuts import render, get_object_or_404
from .models import Chord
def home(request):
return render(request, 'home.html')
def chord_list(request):
chords = Chord.active.all()
data = {
'chords': chords
}
return render(request, 'chord/list.html', data)
def chord_details(request, title):
chord = get_object_or_404(Chord.active, title=title)
data = {
'chord': chord
}
return render(request, 'chord/details.html', data)
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,459 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/patcher/item_patchers.py | import re
class ValidationError(Exception):
pass
class CantApplyPatch(Exception):
pass
class ItemPatcher(object):
def apply_patch(self, content, patch):
"""
Write your code to apply patch to file content.
:param content: (str) file content
:param patch: patch objects
"""
pass
def apply(self, content, patch):
new_content = self.apply_patch(content, patch)
self.validate(new_content)
return new_content
def validate(self, file_obj):
pass
class SettingsPatcher(ItemPatcher):
def apply_patch(self, content, patch_obj):
# Now Just add code to end of file
#TODO: reformat code reindent.py?/?
content += '\n%s\n' % patch_obj['item_to_add']
return content
class ListPatcher(ItemPatcher):
def apply_patch(self, content, patch_obj, variable_name):
# Regular expression of list or tuple variable with open breckets
list_start_reg = r'^%s\s*= *[\(\[]+\n*' % variable_name
list_start = re.search(list_start_reg, content, re.M)
if not list_start:
raise CantApplyPatch('Cant find %s variable' % variable_name)
list_start = list_start.group()
# Regexp of middleware variable with list/tuple of midlewwares and start of next variable
list_variable_reg = r'^%s\s*= *[\(\[]+\n*([^\)\]]+)([\)\]]+\n*[A-Z_0-9= ]*)' % variable_name
try:
list_items_str, next_variable = re.search(list_variable_reg, content, re.M).groups()
except (AttributeError, ValueError):
raise CantApplyPatch
where = 'before' if patch_obj['before'] else 'after' if patch_obj['after'] else None
if where:
item_to_find = patch_obj[where]
else:
item_to_find = None
# Append new list item before or after given list item
item_to_append = "'%s'," % patch_obj['item_to_add']
if item_to_find:
list_items_str = self._append(list_items_str, item_to_find, item_to_append, where)
else:
list_items_str = self._append_to_end(list_items_str, item_to_append)
first_part, last_part = content.split(next_variable)
first_part, __ = first_part.split(list_start)
content = ''.join([first_part, list_start, list_items_str, next_variable, last_part])
return content
def _append_to_end(self, list_items_str, item_to_append):
reg = r'(\,?)[ \t]*$'
comma = re.search(reg, list_items_str).groups()[0]
if not comma:
list_items_str = self._add_comma(list_items_str)
return list_items_str + self._prepare_item_to_add(list_items_str, item_to_append)
def _prepare_item_to_add(self, list_items_str, item_to_append):
return self._get_identation(list_items_str) + item_to_append + '\n'
def _add_comma(self, string):
return re.sub(r'\n$', ',\n', string)
def _get_identation(self, list_items_str):
identation = re.search(r"([ \t]*)['\"]", list_items_str) or ''
if identation:
identation = identation.groups()[0]
return identation
def _append(self, list_items_str, item_to_find, item_to_append, where):
# Regexp
reg = r"[ \t]*'%s' *(,?)\n*" % item_to_find
has_item = re.search(reg, list_items_str)
if not has_item:
list_items_str = self._append_to_end(list_items_str, item_to_append)
else:
item_to_append = self._prepare_item_to_add(list_items_str, item_to_append)
item_to_find = has_item.group()
comma = has_item.groups()[0]
splited_list_data = list_items_str.split(item_to_find)
# Now only append before/after first found item
if where == 'after':
splited_list_data[1] = item_to_append + splited_list_data[1]
else:
splited_list_data[0] = splited_list_data[0] + item_to_append
if not comma:
item_to_find = re.sub(r'\n$', ',\n', item_to_find)
list_items_str = item_to_find.join(splited_list_data)
return list_items_str
class MiddlewarePatcher(ListPatcher):
def apply_patch(self, content, patch_obj):
return super(MiddlewarePatcher, self).apply_patch(content, patch_obj, 'MIDDLEWARE_CLASSES')
class AppsPatcher(ListPatcher):
def apply_patch(self, content, patch_obj):
return super(AppsPatcher, self).apply_patch(content, patch_obj, 'INSTALLED_APPS')
class UrlsPatcher(ItemPatcher):
def apply_patch(self, content, patch_obj):
# Split urls.py by 'url('
parts = content.split('url(')
item_to_find = patch_obj.get('before') or patch_obj.get('after')
# By default item will be added to end
place_id_to_append = len(parts) - 1
# If set parameter after or before what item should we add new - lets find it
if item_to_find:
index_where_item = None
# Find first entry
for i, part in enumerate(parts):
if item_to_find in part:
index_where_item = i
break
if index_where_item is not None:
if patch_obj.get('before'):
# Select item that goes before item that we found
place_id_to_append = index_where_item - 1
else:
place_id_to_append = index_where_item
item_to_append = self._prepare_item_to_append(patch_obj['item_to_add'])
if place_id_to_append == len(parts) - 1:
# If we add in end of list, add identation before
item_to_append = self._get_identation(content) + item_to_append
else:
item_to_append = item_to_append + self._get_identation(content)
parts[place_id_to_append] = self._append_after(
parts[place_id_to_append],
item_to_append)
return 'url('.join(parts)
def _get_identation(self, content):
reg = re.search(r'^(\s*)url\(', content, re.M)
identation = reg.groups()[0] if reg else ''
return identation
def _prepare_item_to_append(self, item_to_append):
return '%s,\n' % item_to_append
def _has_comma(self, string):
reg = re.search(r'(,)\s*$', string)
has_comma = True if reg and reg.groups()[0] else False
return has_comma
def _append_after(self, urlpattern_item, item_to_append):
closing_breckets_count = urlpattern_item.count(')')
if closing_breckets_count:
# Calculate difference between closing and opening breckets
closing_breckets_count = closing_breckets_count - urlpattern_item.count('(')
splited_items = urlpattern_item.split(')')
place_to_append = splited_items[-closing_breckets_count]
if not self._has_comma(place_to_append):
place_to_append = re.sub('\s*$', ',\n', place_to_append)
place_to_append += item_to_append
splited_items[-closing_breckets_count] = place_to_append
urlpattern_item = ')'.join(splited_items)
return urlpattern_item
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,460 | django-stars/guitar | refs/heads/master | /guitar-package/guitar/guitar/inquirer/messages.py | from clint.textui import colored
from textwrap import fill
from . import MAX_WIDTH
class Message(object):
KINDS = {
"message": colored.white,
"success": colored.green,
"warning": colored.yellow,
"error": colored.red
}
def __init__(self, message, max_width=None, kind=None, delay=False):
self.message = message
self.max_width = max_width or MAX_WIDTH
self.kind = kind or "message"
assert self.kind in self.KINDS.keys(), "Unsupported type of message"
if not delay:
self.render()
def prepare_message(self, message):
return fill(message, self.max_width)
def colorize(self, message):
return self.KINDS[self.kind](message)
def render(self):
print(self.colorize(self.prepare_message(self.message)))
def success(msg):
Message(msg, kind="success")
def warning(msg):
Message(msg, kind="warning")
def error(msg):
Message(msg, kind="error")
def message(msg):
Message(msg, kind="message")
| {"/guitar-package/guitar/guitar/cmd/handlers/investigate.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/install.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/search.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-package/guitar/guitar/cmd/handlers/create.py": ["/guitar-package/guitar/guitar/cmd/handlers/base.py"], "/guitar-web/src/guitar/apps/chord/views_api.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/dialogs.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"], "/guitar-web/src/guitar/apps/chord/forms.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-web/src/guitar/apps/chord/admin.py": ["/guitar-web/src/guitar/apps/chord/models.py", "/guitar-web/src/guitar/apps/chord/forms.py"], "/guitar-web/src/guitar/apps/chord/views.py": ["/guitar-web/src/guitar/apps/chord/models.py"], "/guitar-package/guitar/guitar/inquirer/messages.py": ["/guitar-package/guitar/guitar/inquirer/__init__.py"]} |
57,461 | fuadtn/wnd_app | refs/heads/master | /usb_control/usb_server/cUSB/models.py | from django.db import models
class ListModel(models.Model):
device_id = models.CharField(max_length=512, verbose_name='Идентификатор')
class Meta:
verbose_name = 'Список зарегистрированных USB-устройств'
verbose_name_plural = 'Список зарегистрированных USB-устройств'
db_table = 'usb_list'
class JournalModel(models.Model):
device_id = models.CharField(max_length=512, verbose_name='Идентификатор', null=True)
username = models.CharField(max_length=512, verbose_name='Имя пользователя', null=True)
computername = models.CharField(max_length=512, verbose_name='Имя компьютера', null=True)
dtime = models.DateTimeField(verbose_name='Время', null=True)
event = models.CharField(max_length=1024, verbose_name='Описание', null=True)
class Meta:
verbose_name = 'Журнал событий'
verbose_name_plural = 'Журнал событий'
db_table = 'usb_journal' | {"/usb_control/usb_server/cUSB/admin.py": ["/usb_control/usb_server/cUSB/models.py"], "/usb_control/usb_server/cUSB/views.py": ["/usb_control/usb_server/cUSB/models.py"]} |
57,462 | fuadtn/wnd_app | refs/heads/master | /usb_control/usb_server/cUSB/migrations/0002_auto_20170104_1737.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-04 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cUSB', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='journalmodel',
name='eventtime',
),
migrations.AddField(
model_name='journalmodel',
name='dtime',
field=models.DateTimeField(null=True, verbose_name='Время'),
),
migrations.AddField(
model_name='journalmodel',
name='event',
field=models.CharField(max_length=1024, null=True, verbose_name='Описание'),
),
migrations.AlterField(
model_name='journalmodel',
name='computername',
field=models.CharField(max_length=512, null=True, verbose_name='Имя компьютера'),
),
migrations.AlterField(
model_name='journalmodel',
name='device_id',
field=models.CharField(max_length=512, null=True, verbose_name='Идентификатор'),
),
migrations.AlterField(
model_name='journalmodel',
name='username',
field=models.CharField(max_length=512, null=True, verbose_name='Имя пользователя'),
),
]
| {"/usb_control/usb_server/cUSB/admin.py": ["/usb_control/usb_server/cUSB/models.py"], "/usb_control/usb_server/cUSB/views.py": ["/usb_control/usb_server/cUSB/models.py"]} |
57,463 | fuadtn/wnd_app | refs/heads/master | /usb_control/usb_server/cUSB/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-25 11:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JournalModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_id', models.CharField(max_length=512, verbose_name='Идентификатор')),
('username', models.CharField(max_length=512, verbose_name='Имя пользователя')),
('computername', models.CharField(max_length=512, verbose_name='Имя компьютера')),
('eventtime', models.DateTimeField(verbose_name='Время')),
],
options={
'db_table': 'usb_journal',
'verbose_name_plural': 'Журнал событий',
'verbose_name': 'Журнал событий',
},
),
migrations.CreateModel(
name='ListModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_id', models.CharField(max_length=512, verbose_name='Идентификатор')),
],
options={
'db_table': 'usb_list',
'verbose_name_plural': 'Список зарегистрированных USB-устройств',
'verbose_name': 'Список зарегистрированных USB-устройств',
},
),
]
| {"/usb_control/usb_server/cUSB/admin.py": ["/usb_control/usb_server/cUSB/models.py"], "/usb_control/usb_server/cUSB/views.py": ["/usb_control/usb_server/cUSB/models.py"]} |
57,464 | fuadtn/wnd_app | refs/heads/master | /usb_control/usb_server/cUSB/__init__.py | default_app_config = "cUSB.apps.CusbConfig"
| {"/usb_control/usb_server/cUSB/admin.py": ["/usb_control/usb_server/cUSB/models.py"], "/usb_control/usb_server/cUSB/views.py": ["/usb_control/usb_server/cUSB/models.py"]} |
57,465 | fuadtn/wnd_app | refs/heads/master | /usb_control/usb_server/cUSB/admin.py | from django.contrib import admin
from .models import ListModel, JournalModel
class ListAdmin(admin.ModelAdmin):
list_display = ('device_id',)
list_filter = ('device_id',)
class JournalAdmin(admin.ModelAdmin):
list_display = ('dtime', 'computername', 'username', 'device_id', 'event')
list_filter = ('dtime', 'computername',)
def has_add_permission(self, request):
return False
admin.site.register(ListModel, ListAdmin)
admin.site.register(JournalModel, JournalAdmin)
| {"/usb_control/usb_server/cUSB/admin.py": ["/usb_control/usb_server/cUSB/models.py"], "/usb_control/usb_server/cUSB/views.py": ["/usb_control/usb_server/cUSB/models.py"]} |
57,466 | fuadtn/wnd_app | refs/heads/master | /usb_control/usb_server/cUSB/apps.py | from django.apps import AppConfig
class CusbConfig(AppConfig):
name = 'cUSB'
verbose_name = 'USB-устройства' | {"/usb_control/usb_server/cUSB/admin.py": ["/usb_control/usb_server/cUSB/models.py"], "/usb_control/usb_server/cUSB/views.py": ["/usb_control/usb_server/cUSB/models.py"]} |
57,467 | fuadtn/wnd_app | refs/heads/master | /usb_control/usb_server/cUSB/views.py | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
from .models import ListModel, JournalModel
from rest_framework import status
import datetime
@csrf_exempt
def autorization(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
if(username is not None) or (password is not None):
login = authenticate(username=username, password=password)
if login is not None:
return HttpResponse(status=status.HTTP_200_OK)
else:
return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@csrf_exempt
def send_list(request):
if request.method == 'POST':
devices = request.POST.get('devices')
if devices is not None:
s_devices = devices.split(' ')
for sd in s_devices:
list_object = ListModel()
list_object.device_id = sd
list_object.save()
return HttpResponse(status=status.HTTP_200_OK)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
def get_list(request):
objects = ListModel.objects.all()
if objects is not None:
s_result = ''
for o in objects:
s_result += o.device_id + '\n'
return HttpResponse(s_result, content_type='text/plain', status=status.HTTP_200_OK)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@csrf_exempt
def send_event(request):
if request.method == 'POST':
event = JournalModel()
event.dtime = datetime.datetime.now()
event.username = request.POST.get('username')
event.computername = request.POST.get('computername')
event.device_id = request.POST.get('device_id')
event.event = request.POST.get('event')
event.save()
else:
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
| {"/usb_control/usb_server/cUSB/admin.py": ["/usb_control/usb_server/cUSB/models.py"], "/usb_control/usb_server/cUSB/views.py": ["/usb_control/usb_server/cUSB/models.py"]} |
57,518 | BigCet/photo_to_excel | refs/heads/master | /PhotoToExcel.py | from my_modules.my_functions import get_root_path, get_photo_files, get_photo_data
def main():
# get root path and validate it (a bekért utvonal ellenőrzése és rögzítése)
root_folder = get_root_path()
# list jpg files in path (listázza a jpg fájlokat az elérési utban megadott könyvtárból)
photos = get_photo_files(root_folder)
# collect data from images
photo_data_list = [get_photo_data(i) for i in photos]
if __name__ == '__main__':
main()
| {"/PhotoToExcel.py": ["/my_modules/my_functions.py"]} |
57,519 | BigCet/photo_to_excel | refs/heads/master | /my_modules/my_functions.py | import os
from PIL import Image, ExifTags
def get_root_path():
user_input = input("Root folder:")
assert os.path.exists(user_input), f"Nincs ilyen **** {user_input} **** elérési út."
return user_input
def get_photo_files(folder_path):
photo_list = [
os.path.join(folder_path, photo) for photo in os.listdir(folder_path)
if photo.lower().endswith(".jpg")
]
return photo_list
def get_photo_data(file_path):
img = Image.open(file_path)
image_name = os.path.basename(file_path)
image_size = f"{img.size[0]}x{img.size[1]}"
image_data = {
"Name": image_name,
"Dimension": image_size
}
exif_data = img._getexif() # Sometimes None
if not exif_data:
return image_data
for key, value in exif_data.items():
tag_name = ExifTags.TAGS.get(key)
if not tag_name:
continue
if tag_name == "DateTimeOriginal":
image_data[f"Date"] = value
return image_data
if __name__ == '__main__':
image_data = get_photo_data(r"C:\Users\Hp\Desktop\Unoka\BZ07_Toth_Kornel_Ivan_2020-10-17_01.JPG")
print(image_data) | {"/PhotoToExcel.py": ["/my_modules/my_functions.py"]} |
57,523 | juliendurand/pricing-tool | refs/heads/master | /src/pypricing/main.py | import os
import subprocess
import sys
import config
def compile():
print("Compiling regression algorithms...")
command = "time c++ -Wall -std=c++11 -O3 ./src/cpp/*.cpp -o ./bin/glm"
result = subprocess.run(command, shell=True)
if result.returncode != 0:
raise Exception("Fatal Error during compilation.")
print("Compilation Finished.\n")
def fit(config):
result_path = config.get_result_path()
if not os.path.exists(result_path):
os.makedirs(result_path)
print('Fitting model...')
command = 'time ./bin/glm ' + config.filename
result = subprocess.run(command, shell=True)
if result.returncode != 0:
raise Exception('Fatal Error during model fitting.')
print('Model Finished\n')
def generate_documentation(config):
command = 'time python src/py/documentation.py ' + config.filename
result = subprocess.run(command, shell=True)
if result.returncode != 0:
raise Exception('Fatal Error during documentation.')
print('Documentation Finished\n')
if __name__ == '__main__':
if len(sys.argv) != 2:
raise Exception("Invalid number of options, expecting only one : "
"[config filename].")
config_filename = sys.argv[1]
configuration = config.Config(config_filename)
compile()
fit(configuration)
generate_documentation(configuration)
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,524 | juliendurand/pricing-tool | refs/heads/master | /src/pypricing/dataset.py | import csv
import itertools as it
import json
import math
import os
import sys
import time
import numpy as np
import pandas as pd
start_time = 0
def detect_csv_separator(filename):
"""
Utility function to automatically detect the separator character in
a csv file.
"""
with open(filename) as csvfile:
first_line = csvfile.readline()
return csv.Sniffer().sniff(first_line).delimiter
def count_line(filename):
'''
Fast count the number of lines in a file.
'''
f = open(filename, 'rb')
bufgen = it.takewhile(lambda x: x, (f.raw.read(1024 * 1024)
for _ in it.repeat(None)))
return sum(buf.count(b'\n') for buf in bufgen)
def sniff_field(df, field):
field_type = 'Input'
values = df[field].unique()
print(field + ' ' + str(len(values)))
if len(values) > 200:
field_type = 'Ignore'
return (field, field_type)
def get_fields(filename):
separator = detect_csv_separator(filename)
df = pd.read_csv(filename, sep=separator, nrows=100000)
return [sniff_field(df, f) for f in df]
def create_data_file_from_list(lst, out_filename, dtype, shape):
"""Write a list in a binary file as a numpy array.
Args:
lst: The list that will be written in the file.
out_filename: The name of the binary file. It must be in the same
directory.
dtype: The type of the numpy array.
shape: The shape of the numpy array.
"""
with open(out_filename, 'wb+') as out_file:
out_file = open(out_filename, 'wb+')
dat_file = np.memmap(out_file, dtype=dtype, shape=shape)
dat_file[:] = lst[:]
dat_file.flush()
size = float(dat_file.nbytes) / (1024 ** 2)
print('written %s : %.3f MB' % (out_filename, size))
def load_data(file_path, dtype='int32', shape=None):
'''
Loads a numpy array in memory from the filesystem.
'''
return np.memmap(file_path, dtype=dtype, shape=shape)
def printProgressBar(iteration, total, prefix='Progress: ', suffix='Complete',
decimals=1, length=50, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in % complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
global start_time
if iteration == 0:
start_time = time.time()
value = 100 * (iteration / float(total))
percent = ("{0:." + str(decimals) + "f}").format(value)
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
elapsed_time = int(time.time() - start_time)
m = str(elapsed_time // 60).zfill(2)
s = str(elapsed_time % 60).zfill(2)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
class Dataset:
'''
Encapsulate all the data en metadata for a glm regression.
'''
def __init__(self, path=None):
if path:
self.set_path(path)
self.size = -1
self.fields = None
self.features = None
self.modalities = None
self.targets = None
self.csv_filename = None
self.train_size = None
self.test_size = None
def set_path(self, path):
self.path = path
if not os.path.exists(path):
os.makedirs(path)
def set_size(self, size):
self.size = size
def set_fields(self, fields):
self.fields = fields
def set_features(self, features):
self.features = features
def set_targets(self, targets):
self.targets = targets
def set_modalities(self, modalities):
self.modalities = modalities
def count_features(self):
return len(self.features)
def count_modalities(self):
return sum(self.count_modalities_per_feature())
def count_modalities_per_feature(self):
return [len(v) for k, v in self.modalities.items()]
def get_offsets(self):
return np.cumsum([0] + self.count_modalities_per_feature())
def get_feature_index(self, feature):
if feature in self.features:
return self.features.index(feature)
return -1
def get_feature_range(self, feature):
feature_idx = self.get_feature_index(feature)
offsets = self.get_offsets()
return list(range(offsets[feature_idx], offsets[feature_idx + 1]))
def get_modalities(self, feature):
return self.modalities[feature]
def get_feature_modality_by_index(self, idx):
offsets = self.get_offsets()
for feature_idx in range(self.count_features()):
if offsets[feature_idx + 1] > idx:
break
modality_idx = idx - offsets[feature_idx]
feature = self.features[feature_idx]
modality = self.modalities[feature][modality_idx]
return feature, modality
def get_unused_fields(self):
unused_fields = set(self.fields) - set(self.features) - \
set(self.targets)
return list(unused_fields)
def get_dataset_filename(self):
return os.path.join(self.path, 'dataset.json')
def save(self):
dataset_filename = self.get_dataset_filename()
with open(dataset_filename, 'w') as dataset_file:
json.dump(self.__dict__, dataset_file, indent=4)
print("Saved dataset to ", dataset_filename)
def load(self):
dataset_filename = self.get_dataset_filename()
with open(dataset_filename, 'r') as dataset_file:
self.__dict__ = json.load(dataset_file)
def save_simple_config(self):
config_filename = os.path.join(self.path, 'dataset.cfg')
with open(config_filename, 'w') as config:
config.write(str(self.size) + '\n')
config.write(str(self.train_size) + '\n')
config.write(str(self.test_size) + '\n')
config.write(str(self.count_features()) + '\n')
config.write(str(self.count_modalities()) + '\n')
for i in range(self.count_features()):
config.write(self.features[i] + '\n')
for i in range(self.count_features()):
modalities = self.modalities[self.features[i]]
for m in modalities:
config.write(str(m) + '\n')
offsets = self.get_offsets()
for o in offsets:
config.write(str(o) + '\n')
def get_feature_filename(self):
return os.path.join(self.path, "features.dat")
def get_target_filename(self, target):
return os.path.join(self.path, "column_" + target + ".dat")
def get_train_filename(self):
return os.path.join(self.path, "train.dat")
def get_test_filename(self):
return os.path.join(self.path, "test.dat")
def process(self, config, feedback_callback):
self.set_path(config['path'])
context = {"math": math}
exec(config["filter"], context)
exec(config["transform"], context)
exec(config["train"], context)
csv_filename = config['filename']
data_transform = context['data_transform']
data_filter = context['data_filter']
data_train = context['data_train']
features = config['features']
targets = config['targets']
if not features:
raise Exception("No features found.")
if not targets:
raise Exception("No targets found.")
print('Starting data importation from', csv_filename)
nb_lines = count_line(csv_filename) - 1
print("Importing", '{:,}'.format(nb_lines).replace(',', ' '), "lines.")
delimiter = detect_csv_separator(csv_filename)
nb_features = len(features)
observations = np.empty((nb_lines, nb_features), np.dtype('u1'))
target_data = [np.empty((nb_lines), np.dtype('float32'))
for t in targets]
features_mapping = [{} for i in range(nb_features)]
nb_fields = 0
fields = []
features_index = []
targets_index = []
nb_observations = 0
train_set = []
test_set = []
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file, delimiter=delimiter)
fields = [field.strip() for field in reader.fieldnames]
nb_fields = len(fields)
features_index = [fields.index(f) for f in features]
if len(features_index) != nb_features:
raise Exception("Invalid features")
targets_index = [fields.index(t) for t in targets]
if len(targets_index) != len(targets):
raise Exception("Invalid targets")
for i, row in enumerate(reader):
if not data_filter(row):
continue
data_transform(row)
if data_train(row):
train_set.append(nb_observations)
else:
test_set.append(nb_observations)
values = list(row.values())
if len(values) != nb_fields:
raise Exception("Inconsistent number of fields",
len(values), "in line", i + 1,
"expecting", nb_fields)
for j, index in enumerate(features_index):
v = values[index]
a = features_mapping[j].setdefault(v,
len(features_mapping[j])
)
if a > 200:
raise Exception("Feature", features[j],
"has too many modalities " +
"( more than 200).")
observations[nb_observations, j] = a
for idx, t in enumerate(target_data):
t[nb_observations] = float(values[targets_index[idx]])
if i % 1000 == 0 or i == nb_lines - 1:
feedback_callback(i, nb_lines - 1)
nb_observations += 1
create_data_file_from_list(observations[:nb_observations, :],
self.get_feature_filename(),
np.dtype('u1'),
(nb_observations, nb_features))
for i, t in enumerate(targets):
target = target_data[i]
create_data_file_from_list(target[:nb_observations],
self.get_target_filename(t),
np.dtype('float32'),
(nb_observations))
create_data_file_from_list(train_set,
self.get_train_filename(),
np.dtype('int32'),
(len(train_set)))
create_data_file_from_list(test_set,
self.get_test_filename(),
np.dtype('int32'),
(len(test_set)))
modalities = {f: features_mapping[i] for i, f in enumerate(features)}
# invert index and modality and return list of modalities
for k, m in modalities.items():
m = {v: k for k, v in m.items()}
modalities[k] = [m[k] for k in sorted(m)]
if len(modalities[k]) == 1:
raise Exception("Feature", k, " has only one modality and is "
"therefore colinear to the intercept. Please "
"remove it from the dataset as it will cause "
"problems if included.")
self.csv_filename = csv_filename
self.size = nb_observations
self.train_size = len(train_set)
self.test_size = len(test_set)
self.fields = fields
self.features = features
self.targets = targets
self.set_modalities(modalities)
self.save()
self.save_simple_config()
if __name__ == '__main__':
if len(sys.argv) != 2:
raise Exception("Invalid number of options, expecting only one : "
"[config filename].")
filename = sys.argv[1]
print("Processing config file :", filename)
with open(filename) as config_file:
config = json.load(config_file)
Dataset().process(config, printProgressBar)
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,525 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/migrations/0006_auto_20190108_1306.py | # Generated by Django 2.1.4 on 2019-01-08 13:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pricing', '0005_auto_20190108_1301'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='fn_filter',
field=models.TextField(default="def data_filter(row):\n return row['HAB_hab'] == 'A'\n"),
),
migrations.AlterField(
model_name='dataset',
name='fn_transform',
field=models.TextField(default="def data_transform(row):\n row['POL_mtcapass'] = int(math.log(float(row['POL_mtcapass'])))\n"),
),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,526 | juliendurand/pricing-tool | refs/heads/master | /src/pypricing/result.py | import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import metrics
import dataset
class Result:
def __init__(self, config):
self.config = config
self.label = config.label
self.path = config.get_result_path()
self.dataset = dataset.Dataset(config.get_dataset_filename())
self.dataset.load()
self.df = self.load_results()
self.data = self.load_data()
self.selected_features = self.load_selected_features()
weight_filename = self.dataset.get_target_filename(config.weight)
self.weight = np.memmap(weight_filename, 'float32')
target_filename = self.dataset.get_target_filename(config.target)
self.target = np.memmap(target_filename, 'float32')
train_data_filename = self.dataset.get_train_filename()
self.train_data_index = np.memmap(train_data_filename, 'int32')
test_data_filename = self.dataset.get_test_filename()
self.test_data_index = np.memmap(test_data_filename, 'int32')
#if self.config.loss == "gamma":
self.test_data_index = np.intersect1d(self.test_data_index,
self.weight.nonzero())
self.train_data_index = np.intersect1d(self.train_data_index,
self.weight.nonzero())
self.train_data = self.data[self.train_data_index, :]
self.test_data = self.data[self.test_data_index, :]
self.df_coeffs = self.load_coeffs()
self.gini_curve = self.load_gini_curve()
self.nb_features = self.gini_curve.shape[0]
def load_results(self):
return pd.read_csv(os.path.join(self.path, "results.csv"))
def load_data(self):
file_path = self.dataset.get_feature_filename()
shape = (self.dataset.size, self.dataset.count_features())
return np.memmap(file_path, dtype=np.dtype('u1'), shape=shape)
def load_coeffs(self):
df_coeffs = pd.read_csv(os.path.join(self.path, "coeffs.csv")).values
return np.exp(df_coeffs)
def load_gini_curve(self):
ginipath_filename = os.path.join(self.path, 'ginipath.csv')
df = pd.read_csv(ginipath_filename)
df.Gini *= 100
df['% max Gini'] = df.Gini / df.Gini.max() * 100
df = df.round(2)
return df
def load_selected_features(self):
selected_features_filename = os.path.join(self.path, 'features.csv')
df = pd.read_csv(selected_features_filename)
df.Gini *= 100
df['Gini Contribution'] = \
(df.Gini / df.Gini.max() * 100).diff().fillna(0)
df = df.round(2)
return df[1:]
def get_coeffs(self, feature_range):
return self.df_coeffs[1 + np.array(feature_range)]
def write_coeffs_as_csv(self, path):
clean_coeffs_filename = os.path.join(path, 'coefficients.csv')
with open(clean_coeffs_filename, 'w') as coeffs_file:
coeffs_file.write('Feature,Modality,Coefficient\n')
coeffs_file.write('Intercept,Intercept,' +
str(float(self.df_coeffs[0])) + '\n')
for i, coeff in enumerate(self.df_coeffs[1:]):
f, m = self.dataset.get_feature_modality_by_index(i)
if self.selected_features.Feature.str.contains(f).any():
coeffs_file.write(str(f) + ',' + str(m) + ',' +
str(round(float(coeff), 6)) + '\n')
def deviance_reduction(self):
m = metrics.poisson_deviance(self.df.target / self.df.exposure,
self.df.prediction / self.df.exposure,
self.df.exposure)
m0_pred = (self.df.target / self.df.exposure).mean()
m0 = metrics.poisson_deviance(self.df.target / self.df.exposure,
m0_pred,
self.df.exposure)
return (m - m0) / m0
def gini(self):
return metrics.gini(
self.df.target / self.df.exposure,
self.df.prediction / self.df.exposure,
self.df.exposure)
def rmse(self):
return metrics.root_mean_square_error(
self.df.target,
self.df.prediction,
self.df.exposure)
def get_gini_curve(self, n=20):
return self.gini_curve.head(int(n) + 1)
def plot_gini_curve(self, path, n=21):
print('Plot Gini Curve')
ginipath_filename = os.path.join(self.path, 'ginipath.csv')
df = pd.read_csv(ginipath_filename)
n = min(n, df.shape[0])
ar = np.arange(n)
gini = df.head(n).Gini * 100
fig, ax = plt.subplots(figsize=(10, 8))
ax.set_title('Gini Curve')
ax.set_ylim([0, max(gini) * 1.1])
ax.plot(range(0, n), gini, marker='o')
ax.legend()
ax.set_xticks(ar)
filename = os.path.join('img', 'gini_path.png')
plt.savefig(os.path.join(path, filename))
plt.close()
return filename
def fill_missing_modalities(self, df, modalities):
for i in range(len(modalities)):
if i not in df.index:
df.loc[i] = [0, 1, 1]
return df
def calculate_relativities(self, feature):
df = self.df
idx = self.dataset.get_feature_index(feature)
modalities = self.dataset.get_modalities(feature)
try:
modalities = [float(m) for m in modalities]
if sum([m - int(m) for m in modalities]) == 0:
modalities = np.array([int(m) for m in modalities])
except Exception:
# modalities are not integers -> that is perfectly ok
pass
df['f'] = self.test_data[:, idx]
relativity = df.groupby(['f']).agg({
'exposure': 'sum',
'target': 'mean',
'prediction': 'mean'
})
relativity = relativity.rename({
'exposure': 'Exposure',
'target': 'Target',
'prediction': 'Prediction',
}, axis=1)
relativity.Exposure = relativity.Exposure.astype('int')
relativity.Prediction /= df.target.mean()
relativity.Prediction = relativity.Prediction.round(decimals=3)
relativity.Target /= df.target.mean()
relativity.Target = relativity.Target.round(decimals=3)
relativity = self.fill_missing_modalities(relativity, modalities)
relativity = relativity.sort_index()
relativity['Modalities'] = modalities
relativity['Coefficients'] = self.get_coeffs(
self.dataset.get_feature_range(feature)
)
relativity['Coefficients'] = relativity['Coefficients'] \
.round(decimals=3)
relativity = relativity.sort_values('Modalities')
relativity = relativity.reset_index()
relativity = relativity[['Modalities',
'Coefficients',
'Exposure',
'Prediction',
'Target']]
return relativity
def plot_relativities(self, feature, path):
print("Plot Relativities ", feature)
relativity = self.calculate_relativities(feature)
ar = np.arange(relativity.Prediction.size)
# Create chart
fig, ax1 = plt.subplots(figsize=(10, 8))
# Exposure on first axis
ax1.bar(ar, relativity.Exposure, color='#fffca0', edgecolor='grey')
ax1.set_ylim(ymax=relativity.Exposure.max() * 3)
ax1.set_xticks(ar)
ax1.set_xticklabels(labels=relativity.Modalities)
ax1.set_ylabel('Weight')
# Relativities on second axis
ax2 = ax1.twinx()
ax2.set_title(feature)
ax2.plot(ar, relativity.Prediction, color="#0f600e", marker=".")
ax2.plot(ar, relativity.Target, color="#c242f4", marker=".")
ax2.plot(ar, relativity.Coefficients, color="#93ff9e", marker="^")
ax2.axhline(y=1, color='black', linewidth=1, linestyle="dotted")
ax2.set_ylim(ymin=0)
ax2.set_ylabel('Values')
filename = os.path.join(path, 'img', 'relativity_' + feature + '.png')
plt.savefig(filename)
plt.close()
return os.path.join('img', 'relativity_' + feature + '.png')
def plot_lift_curve(self, path, n_band=10):
print("Plot Lift Curve")
y = self.df.target
y_pred = self.df.prediction
weight = self.df.exposure
if weight is None:
weight = np.ones(y.shape[0])
d = {'pred': list(y_pred), 'obs': list(y), 'weights': list(weight)}
d = pd.DataFrame(d)
d = d.dropna(subset=['obs', 'pred'])
d = d.sort_values('pred', ascending=True)
d.index = list(range(0, len(y_pred)))
exp_cum = [0]
for k in range(0, len(y_pred)):
exp_cum.append(exp_cum[-1] + d.ix[k, 'weights'])
s = exp_cum[-1]
j = s // n_band
m_pred, m_obs, m_weight = [], [], []
k, k2 = 0, 0
for i in range(0, n_band):
k = k2
for p in range(k, len(y_pred)):
if exp_cum[p] < ((i + 1) * j):
k2 += 1
temp = d.ix[range(k, k2), ]
m_pred.append(sum(temp['pred'] * temp['weights']) /
sum(temp['weights']))
m_obs.append(sum(temp['obs'] * temp['weights']) /
sum(temp['weights']))
m_weight.append(temp['weights'].sum())
fig, ax1 = plt.subplots(figsize=(10, 8))
ax2 = ax1.twinx()
ax1.set_title('Lift Curve')
ax1.set_ylim([0, max(m_weight) * 3])
# the histogram of the weigths
ax1.bar(range(0, n_band), m_weight, color='#fffca0',
edgecolor='grey')
ax2.plot(range(0, n_band), m_pred, linestyle='--',
marker='o', color='b')
ax2.plot(range(0, n_band), m_obs, linestyle='--',
marker='o', color='r')
ax2.legend(labels=['Predicted', 'Observed'], loc=2)
ax1.set_xlabel('Band')
ax2.set_ylabel('Y values')
ax1.set_ylabel('Weight')
filename = os.path.join(path, 'img', 'lift_curve.png')
fig.savefig(filename, bbox_inches='tight')
plt.close()
return os.path.join('img', 'lift_curve.png')
def gini_coeffs(self, feature):
df = self.calculate_relativities(feature)
df = df.sort_values('Coefficients')
w = df.Exposure.values
c = df.Coefficients.values * w
g = 0
cc = 0
cw = 0
for i in range(len(c)):
g += w[i] * (2 * cc + c[i])
cc += c[i]
cw += w[i]
g = 1 - g / (cc * cw)
g = 0 if g < 0.00001 else g
return g
if __name__ == '__main__':
import config
configuration = config.Config("config/model_charge_ddea.cfg")
r = Result(configuration)
for f in r.gini_curve['Feature']:
if f == 'Intercept':
continue
print(f, str(r.gini_coeffs(f)))
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,527 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/migrations/0003_auto_20181218_2234.py | # Generated by Django 2.1.4 on 2018-12-18 22:34
from django.db import migrations, models
import pricing.models
class Migration(migrations.Migration):
dependencies = [
('pricing', '0002_auto_20181218_2231'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='status',
field=models.CharField(choices=[('Input', 'Input'), ('Target', 'Target')], default=pricing.models.FeatureStatus('Input'), max_length=10),
),
migrations.AlterField(
model_name='model',
name='loss',
field=models.CharField(choices=[('Gaussian', 'Gaussian'), ('Logistic', 'Logistic'), ('Poisson', 'Poisson'), ('Gamma', 'Gamma')], default=pricing.models.LossFunction('Gaussian'), max_length=10),
),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,528 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/templatetags/pygmentize.py | import pygments
from django import template
from django.utils.safestring import mark_safe
from pygments import lexers
# from pygments import formatters
from pygments.formatters import HtmlFormatter
register = template.Library()
@register.filter(name='pygmentize', is_safe=True)
def pygmentize(value, language):
lexer = lexers.get_lexer_by_name(language)
output = pygments.highlight(value, lexer, HtmlFormatter())
#print(HtmlFormatter().get_style_defs('.highlight'))
return mark_safe(output)
#@register.filter(name='pygmentize_css', is_safe=True)
#def pygmentize_css(value, language):
# output = HtmlFormatter().get_style_defs('.highlight'))
# return mark_safe(output)
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,529 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/migrations/0012_auto_20190109_2020.py | # Generated by Django 2.1.4 on 2019-01-09 20:20
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.functions.text
class Migration(migrations.Migration):
dependencies = [
('pricing', '0011_auto_20190109_1930'),
]
operations = [
migrations.CreateModel(
name='ModelFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pricing.Feature')),
],
options={
'ordering': [django.db.models.functions.text.Lower('feature.name')],
},
),
migrations.AlterField(
model_name='model',
name='features',
field=models.ManyToManyField(related_name='features', to='pricing.ModelFeature'),
),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,530 | juliendurand/pricing-tool | refs/heads/master | /src/pypricing/metrics.py | """
Each function in this module has at least three arguments,
which are numpy arrays of the same size.
``function(y, y_pred, weights = None, **kwargs)``
where ``**kwargs`` are **named** extra-parameters.
"""
import numpy as np
def mean_absolute_error(y, y_pred, weight=None):
return np.sum(np.abs(y - y_pred)) / np.sum(weight)
def root_mean_square_error(y, y_pred, weight=None):
return np.sqrt(np.sum(np.square(y - y_pred)) / np.sum(weight))
def check_deviance(y, y_pred, weight=None):
"""
checks to run at beginning of deviance
"""
if isinstance(y_pred, (np.floating, float)):
y_pred = np.repeat(y_pred, y.shape[0])
assert y.shape[0] == y_pred.shape[0], \
"y and y_pred must have the same size"
if weight is not None:
assert weight.shape[0] == y.shape[0], \
"weight and y do not have same shape"
return y_pred
def gaussian_deviance(y, y_pred, weight=None):
"""
Deviance function for the gaussian/least squares model.
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed deviance
"""
y_pred = check_deviance(y, y_pred, weight=weight)
deviance_vector = np.square(y - y_pred)
if weight is not None:
deviance_vector = np.dot(weight, deviance_vector)
return 0.5 * np.sum(deviance_vector)
def poisson_deviance(y, y_pred, weight=None):
"""
Deviance function for the poisson model.
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed deviance
"""
y_pred = check_deviance(y, y_pred, weight=weight)
bool_zeros = y != 0
deviance_vector = np.zeros(y.shape[0])
deviance_vector[bool_zeros] = \
(y[bool_zeros] * np.log(y[bool_zeros] / y_pred[bool_zeros]) -
y[bool_zeros] + y_pred[bool_zeros])
deviance_vector[~bool_zeros] = - y[~bool_zeros] + y_pred[~bool_zeros]
if weight is not None:
deviance_vector = np.dot(weight, deviance_vector)
return 2 * np.sum(deviance_vector)
def gamma_deviance(y, y_pred, weight=None):
"""
Deviance function for gamma model.
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed deviance
"""
y_pred = check_deviance(y, y_pred, weight=weight)
deviance_vector = -np.log(y / y_pred) + (y - y_pred) / y_pred
if weight is not None:
deviance_vector = np.dot(weight, deviance_vector)
return 2 * np.sum(deviance_vector)
def binomial_deviance(y, y_pred, weight=None):
"""
Variance for the binomial model
Parameters
----------
y : ndarray
array containing the TRUE response (either 0 or 1)
y_pred : ndarray
array containing the predicted probabilities by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed deviance
"""
y_pred = check_deviance(y, y_pred, weight=weight)
deviance_vector = - (y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred))
if weight is not None:
deviance_vector = np.dot(weight, deviance_vector)
return 2 * np.sum(deviance_vector)
def gaussian_pseudo_r2(y, y_pred, weight=None):
"""
PseudoR2 for a Normal model.
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed pseudo_R2
Notes
-----
Pseudo R2 is defined as :
1 - (deviance(y,y_pred,weight) / deviance(y,mu,weight)
where mu is the weighted mean of y
"""
return 1 - (gaussian_deviance(y, y_pred, weight) /
gaussian_deviance(y, np.ones(len(y)) *
np.average(y, weights=weight), weight))
def poisson_pseudo_r2(y, y_pred, weight=None):
"""
PseudoR2 for the Poisson model.
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed pseudo_R2
Notes
-----
Pseudo R2 is defined as :
1 - (deviance(y,y_pred,weight) / deviance(y,mu,weight)
where mu is the weighted mean of y
"""
return 1 - (poisson_deviance(y, y_pred, weight) /
poisson_deviance(y, np.ones(len(y)) *
np.average(y, weights=weight), weight))
def gamma_pseudo_r2(y, y_pred, weight=None):
"""
Pseudo R2 for gamma model
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed pseudo_R2
Notes
-----
Pseudo R2 is defined as :
1 - (deviance(y,y_pred,weight) / deviance(y,mu,weight)
where mu is the weighted mean of y
"""
return 1 - (gamma_deviance(y, y_pred, weight) /
gamma_deviance(y, np.ones(len(y)) *
np.average(y, weights=weight), weight))
def binomial_pseudo_r2(y, y_pred, weight=None):
"""
PseudoR2 for the binomial model
Parameters
----------
y : ndarray
array containing the TRUE response (either 0 or 1)
y_pred : ndarray
array containing the predicted probabilities by the model
weight : ndarray, optional
array containing the weight (default 1)
Returns
-------
ndarray
computed pseudo_R2
Notes
-----
Pseudo R2 is defined as :
1 - (deviance(y,y_pred,weight) / deviance(y,mu,weight)
where mu is the weighted mean of y
"""
return 1 - (binomial_deviance(y, y_pred, weight) /
binomial_deviance(y, np.ones(len(y)) *
np.average(y, weights=weight), weight))
def area_lorentz_fast(y, y_pred, weight=None):
'''
Calcultate the weighted gini.
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
'''
if y.shape[0] != y_pred.shape[0]:
raise ValueError("y and y_pred must have the same length.")
n_samples = y.shape[0]
if weight is None:
weight = np.ones(n_samples)
# Id of each column
obs_col, pred_col, w_col, rank_col = 0, 1, 2, 3
# Order data following prediction
ordered_data = np.column_stack((y, y_pred, weight, np.zeros(y.shape[0])))
pred_order = np.argsort(ordered_data[:, pred_col])[::-1]
ordered_data = ordered_data[pred_order, :]
# Compute the rank
ordered_data[:, rank_col] = np.cumsum(ordered_data[:, w_col]) - 1. / \
2 * ordered_data[:, w_col]
total_weight = np.sum(ordered_data[:, w_col])
obs_sum = np.dot(ordered_data[:, w_col], ordered_data[:, obs_col])
intermediate = ordered_data[:, 0] * ordered_data[:, 2] * ordered_data[:, 3]
rank_obs_sum = intermediate.sum()
# Compute the weighted Gini
gini = 1 - (2 / (total_weight * obs_sum)) * rank_obs_sum
return gini
def gini(y, y_pred, weights=None, normalize_gini=False):
'''
Calcultate the weighted gini.
Parameters
----------
y : ndarray
array containing the TRUE response
y_pred : ndarray
array containing the value predicted by the model
weight : ndarray, optional
array containing the weight (default 1)
normalize_gini : boolean, optional
flag to get the standard or normalized gini coefficient
'''
gini = area_lorentz_fast(y, y_pred, weights)
if normalize_gini:
# compute the weighted Gini for the "perfect model"
gini_perfect_model = area_lorentz_fast(y, y, weights)
# normalization
gini = gini / gini_perfect_model
return gini
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,531 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/urls.py |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('dataset/', views.DatasetListView.as_view(),
name='dataset_list'),
path('dataset/<int:pk>/', views.DatasetDetailView.as_view(),
name='dataset_detail'),
path('dataset/create', views.DatasetCreateView.as_view(),
name='dataset_create'),
path('dataset/<int:pk>/delete/', views.DatasetDeleteView.as_view(),
name='dataset_delete'),
path('dataset/<int:pk>/process/', views.dataset_process,
name='dataset_process'),
path('feature/update/<int:pk>/', views.FeatureUpdateView.as_view(),
name='feature_update'),
path('model/', views.ModelListView.as_view(),
name='model_list'),
path('model/<int:pk>/', views.ModelDetailView.as_view(),
name='model_detail'),
path('model/create', views.ModelCreateView.as_view(),
name='model_create'),
path('model/<int:pk>/update/', views.ModelUpdateView.as_view(),
name='model_update'),
path('model/<int:pk>/delete/', views.ModelDeleteView.as_view(),
name='model_delete'),
path('model/<int:pk>/run/', views.run,
name='run'),
path('feature/switch/<int:pk>/<int:mf>/', views.switchFeature,
name='switch'),
path('run/<int:pk>/', views.RunDetailView.as_view(),
name='run_detail'),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,532 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/migrations/0011_auto_20190109_1930.py | # Generated by Django 2.1.4 on 2019-01-09 19:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pricing', '0010_auto_20190108_2231'),
]
operations = [
migrations.AlterField(
model_name='model',
name='target',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target', to='pricing.Feature'),
),
migrations.AlterField(
model_name='model',
name='weight',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='weight', to='pricing.Feature'),
),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,533 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/migrations/0008_auto_20190108_1451.py | # Generated by Django 2.1.4 on 2019-01-08 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pricing', '0007_auto_20190108_1450'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='fn_transform',
field=models.TextField(default="def data_transform(row):\n row['POL_mtcapass'] = int(math.log(float(row['POL_mtcapass'])))\n"),
),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,534 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/migrations/0013_auto_20190109_2138.py | # Generated by Django 2.1.4 on 2019-01-09 21:38
from django.db import migrations, models
import django.db.models.functions.text
class Migration(migrations.Migration):
dependencies = [
('pricing', '0012_auto_20190109_2020'),
]
operations = [
migrations.AlterModelOptions(
name='modelfeature',
options={'ordering': [django.db.models.functions.text.Lower('feature__name')]},
),
migrations.AddField(
model_name='model',
name='max_nb_features',
field=models.PositiveSmallIntegerField(default=20),
),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
57,535 | juliendurand/pricing-tool | refs/heads/master | /gui/pricing/migrations/0009_auto_20190108_2220.py | # Generated by Django 2.1.4 on 2019-01-08 22:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pricing', '0008_auto_20190108_1451'),
]
operations = [
migrations.AlterField(
model_name='model',
name='features',
field=models.ManyToManyField(related_name='features', to='pricing.Feature'),
),
migrations.AlterField(
model_name='model',
name='target',
field=models.ForeignKey(limit_choices_to='belong_to', on_delete=django.db.models.deletion.CASCADE, related_name='target', to='pricing.Feature'),
),
]
| {"/gui/pricing/views.py": ["/gui/pricing/models.py"], "/gui/pricing/admin.py": ["/gui/pricing/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.