index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
16,269,951
|
CeeEffEff/DQNExploration
|
refs/heads/main
|
/test_agent_driver.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import os
from datetime import datetime
import tensorflow as tf
tf.compat.v1.enable_v2_behavior()
from dqn_agent_driver import AgentDriver
VISUALISATIONS_DIR = "visualisations"
if not os.path.exists(VISUALISATIONS_DIR):
os.makedirs(VISUALISATIONS_DIR)
visual_subdir = os.path.join(VISUALISATIONS_DIR, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
if not os.path.exists(visual_subdir):
os.makedirs(visual_subdir)
graph_file_name_prefix = os.path.join(visual_subdir, "AverageReturn_")
def plot_average_returns(average_returns, iteration):
plt.plot(average_returns)
plt.title('Average Return of Target per Iteration')
plt.xlabel('Iteration')
plt.ylabel('Average Return')
plt.savefig(f"{graph_file_name_prefix}__{iteration}.png")
num_iterations = 2000
iterations = list(range(0, num_iterations + 1))
average_returns = []
driver = AgentDriver(num_collect_episodes=10, num_eval_episodes= 4, replay_buffer_capacity = 10000, verbose_env=True)
input_bool = False
input_frequency = 10
evaluate_before_train = False
print("Initialising target...")
_, average_return = driver.run_target(verbose=True)
average_returns.append(average_return)
if input_bool:
input("Initialised, PRESS ENTER to continue")
else:
print("Initialised, PRESS ENTER to continue")
def pause_input(message:str, iteration:int):
if input_frequency == 0:
return
if iteration % input_frequency == 0 and input_bool:
input(message)
for i in iterations[1:]:
pause_input("Press ENTER to explore using collect policy", i)
print("Iteration", i)
print("Exploring...")
driver.run_collect(verbose=True)
if (evaluate_before_train):
pause_input("Press ENTER to evaluate target before training", i)
print("Before training, evaluating target...")
driver.run_target(verbose=True)
pause_input("Press ENTER to train", i)
print("Training...")
interation_losses = driver.train_target(train_steps=100, sample_batch_size=16, verbose=True)
print()
pause_input("Press ENTER to evaluate target after training", i)
print("Evaluating target...")
num_episodes, average_return = driver.run_target(verbose=True)
pause_input("Press ENTER continue after the above evaluation", i)
average_returns.append(average_return)
plot_average_returns(average_returns, i)
input("Completed")
|
{"/optuna_agent_driver.py": ["/dqn_agent_driver.py"], "/dqn_my_agent.py": ["/dqn_my_env.py"], "/test_agent_driver.py": ["/dqn_agent_driver.py"], "/dqn_agent_driver.py": ["/dqn_my_agent.py"]}
|
16,269,952
|
CeeEffEff/DQNExploration
|
refs/heads/main
|
/dqn_agent_driver.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
from dqn_my_agent import MyAgent
import numpy as np
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.metrics import tf_metrics
from tf_agents.replay_buffers import tf_uniform_replay_buffer, episodic_replay_buffer
import tensorflow as tf
tf.compat.v1.enable_v2_behavior()
class AgentDriver:
def __init__(self, num_collect_episodes, num_eval_episodes, replay_buffer_capacity, learning_rate, fc_layer_units, fc_layer_depth, verbose_env=False, show_summary=False):
self._agent = MyAgent(learning_rate, fc_layer_units, fc_layer_depth, verbose_env=verbose_env, show_summary=show_summary)
self._agent.initialize()
self._collect_driver = AgentCollectPolicyDriver(self._agent, num_collect_episodes, replay_buffer_capacity)
self._target_driver = AgentTargetPolicyDriver(self._agent, num_eval_episodes)
def run_collect(self,verbose=False):
self._agent.reset_ep_counter()
_ = self._collect_driver.run(verbose=verbose)
if verbose:
print()
def train_target(self,train_steps:int, sample_batch_size:int, verbose=False):
dataset = self._collect_driver._replay_buffer.as_dataset(
num_parallel_calls=AUTOTUNE,
single_deterministic_pass=True,
sample_batch_size=sample_batch_size, # Simply influences when we update - analyse 4 then update. Lower batch size - more responsive to one training
num_steps=2 # Shows directly the transition of one step to another
# The agent requires that this be 2 as it learns transitions in this way
)
iterator = iter(dataset)
# Now we have defined how we want to pull data out (sample) we sample and train for a set number of samples
num_train_steps = train_steps
print("Number of frames in replay: ", self._collect_driver._replay_buffer.num_frames().numpy())
num_train_steps = int(self._collect_driver._replay_buffer.num_frames().numpy()/sample_batch_size)
if num_train_steps == 0:
num_train_steps = 1
total_loss = 0
max_loss = 0
all_loss = []
for i in range(num_train_steps):
trajectories, _ = next(iterator)
loss = self._agent.train(experience=trajectories)
all_loss.append(loss.loss)
max_loss = max(max_loss, loss.loss)
total_loss += loss.loss
if verbose:
print(f"[{i}] Loss: {loss.loss}", end="\r")
if verbose:
print()
print(f"[Total] Loss: {total_loss}")
print(f"[Average] Loss: {total_loss/num_train_steps}")
print(f"[Max] Loss: {max_loss}")
print()
return all_loss
def train_target_on_all(self, verbose=False):
trajectories = self._collect_driver._replay_buffer.gather_all()
loss = self._agent.train(experience=trajectories)
if verbose:
print(f"[All] Loss: {loss.loss}")
if verbose:
print()
def run_target(self,verbose=False):
self._agent.reset_ep_counter()
_, _, num_episodes, average_return = self._target_driver.run(verbose=verbose)
if verbose:
print()
return num_episodes, average_return
class AgentCollectPolicyDriver(dynamic_episode_driver.DynamicEpisodeDriver):
def __init__(self, agent, num_episodes, replay_buffer_capacity):
self._agent = agent
batch_size = 1 if not self._agent._tf_env.batched else self._agent._tf_env.batch_size
self._replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec = self._agent.collect_data_spec,
batch_size = batch_size,
max_length=replay_buffer_capacity
)
self._num_episodes_metric = tf_metrics.NumberOfEpisodes()
self._env_steps = tf_metrics.EnvironmentSteps()
self._average_rtn = tf_metrics.AverageReturnMetric()
observers = [self._replay_buffer.add_batch, self._num_episodes_metric, self._env_steps, self._average_rtn]
super().__init__(self._agent._tf_env, self._agent.collect_policy, observers=observers, num_episodes=num_episodes)
# Initial driver.run will reset the environment and initialize the policy.
# _, _ = self._driver.run()
def reset_observers(self):
self._num_episodes_metric.reset()
self._env_steps.reset()
self._average_rtn.reset()
self._replay_buffer.clear()
def run(self, verbose=False):
self.reset_observers()
final_time_step, policy_state = super().run()
if verbose:
self.display_metrics()
return final_time_step, policy_state
def display_metrics(self):
print()
print('Number of Steps: ', self._env_steps.result().numpy())
print('Number of Episodes: ', self._num_episodes_metric.result().numpy())
print('Average Return: ', self._average_rtn.result().numpy())
class AgentTargetPolicyDriver(dynamic_episode_driver.DynamicEpisodeDriver):
def __init__(self, agent, num_episodes):
self._agent = agent
self._num_episodes_metric = tf_metrics.NumberOfEpisodes()
self._env_steps = tf_metrics.EnvironmentSteps()
self._average_rtn = tf_metrics.AverageReturnMetric()
observers = [self._num_episodes_metric, self._env_steps, self._average_rtn]
super().__init__(self._agent._tf_env, self._agent.policy, observers=observers, num_episodes=num_episodes)
# Initial driver.run will reset the environment and initialize the policy.
# _, _ = self._driver.run()
def reset_observers(self):
self._num_episodes_metric.reset()
self._env_steps.reset()
self._average_rtn.reset()
def run(self, verbose=False):
self.reset_observers()
final_time_step, policy_state = super().run()
if verbose:
self.display_metrics()
return final_time_step, policy_state, self._num_episodes_metric.result().numpy(), self._average_rtn.result().numpy()
def display_metrics(self):
print()
print('Number of Steps: ', self._env_steps.result().numpy())
print('Number of Episodes: ', self._num_episodes_metric.result().numpy())
print('Average Return: ', self._average_rtn.result().numpy())
|
{"/optuna_agent_driver.py": ["/dqn_agent_driver.py"], "/dqn_my_agent.py": ["/dqn_my_env.py"], "/test_agent_driver.py": ["/dqn_agent_driver.py"], "/dqn_agent_driver.py": ["/dqn_my_agent.py"]}
|
16,285,847
|
kveldhaugur/verklegt-2
|
refs/heads/main
|
/ship_o_cereal/main/models.py
|
from django.db import models
class Manufacturer(models.Model):
ManID = models.IntegerField(primary_key=True, serialize=True)
Name = models.CharField(max_length=255)
class ManufacturerLogo(models.Model):
ManID = models.OneToOneField(Manufacturer, on_delete=models.CASCADE, unique=True)
Image = models.CharField(max_length=9999)
class ItemCategory(models.Model):
CategoryID = models.IntegerField(primary_key=True, serialize=True)
CategoryTag = models.CharField(max_length=255)
class Items(models.Model):
ItemID = models.IntegerField(primary_key=True, serialize=True)
ManID = models.ForeignKey(Manufacturer, on_delete=models.CASCADE)
Quantity_available = models.IntegerField()
Price = models.IntegerField()
Name = models.CharField(max_length=255)
Description = models.CharField(max_length=9999, blank=True)
Image = models.CharField(max_length=255, null=True)
Tags = models.ManyToManyField(ItemCategory)
class Country(models.Model):
CountryID = models.IntegerField(primary_key=True, serialize=True)
CountryName = models.CharField(max_length=255)
class Account(models.Model):
AccountID = models.IntegerField(primary_key=True, serialize=True)
AccountName = models.CharField(max_length=255)
AccountPass = models.CharField(max_length=255)
ProfilePic = models.CharField(max_length=255)
DateOfBirth = models.DateTimeField(auto_now=False, null=True)
class UserInfo(models.Model):
AccountConnected = models.OneToOneField('Account', primary_key=True, on_delete=models.CASCADE)
FirstName = models.CharField(max_length=255)
LastName = models.CharField(max_length=255)
City = models.CharField(max_length=255)
PostalCode = models.CharField(max_length=15)
Address = models.CharField(max_length=255)
HouseNum = models.IntegerField()
MobilePhone = models.CharField(max_length=63)
Email = models.CharField(max_length=255)
SSN = models.CharField(max_length=255)
Country = models.ForeignKey(Country, on_delete=models.CASCADE)
class Order(models.Model):
OrderID = models.IntegerField(primary_key=True, serialize=True)
AccountID = models.ForeignKey('Account', on_delete=models.CASCADE, null=False)
ItemsInOrder = models.ManyToManyField(Items)
class OrderContains(models.Model):
ItemID = models.ForeignKey(Items, on_delete=models.CASCADE)
Quantity = models.IntegerField(null=False)
# Create your models here.
|
{"/ship_o_cereal/main/admin.py": ["/ship_o_cereal/main/models.py"], "/ship_o_cereal/userprofile/views.py": ["/ship_o_cereal/userprofile/forms/edit_user.py"], "/ship_o_cereal/main/history_processor.py": ["/ship_o_cereal/main/models.py"]}
|
16,285,848
|
kveldhaugur/verklegt-2
|
refs/heads/main
|
/ship_o_cereal/catalogue/views.py
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from main.models import Items
# Create your views here.
def index(request):
context = {'items' : Items.objects.all().order_by('Name')}
return render(request, 'catalogue/index.html', context)
def get_item_by_id(request, id):
return render(request, 'catalogue/item-details.html', {
'item': get_object_or_404(Items, pk=id)
})
|
{"/ship_o_cereal/main/admin.py": ["/ship_o_cereal/main/models.py"], "/ship_o_cereal/userprofile/views.py": ["/ship_o_cereal/userprofile/forms/edit_user.py"], "/ship_o_cereal/main/history_processor.py": ["/ship_o_cereal/main/models.py"]}
|
16,285,849
|
kveldhaugur/verklegt-2
|
refs/heads/main
|
/ship_o_cereal/catalogue/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='catalogue-index'),
path('<int:id>', views.get_item_by_id, name='item-details')
]
|
{"/ship_o_cereal/main/admin.py": ["/ship_o_cereal/main/models.py"], "/ship_o_cereal/userprofile/views.py": ["/ship_o_cereal/userprofile/forms/edit_user.py"], "/ship_o_cereal/main/history_processor.py": ["/ship_o_cereal/main/models.py"]}
|
16,436,373
|
detect-dev/django-extended-shell
|
refs/heads/main
|
/extended_shell/settings.py
|
from django.conf import settings
# Enable / Disable colors
EXTENDED_SHELL_COLORED = getattr(
settings, 'EXTENDED_SHELL_COLORED', True)
# Import models from INSTALLED_APPS
EXTENDED_SHELL_IMPORT_APPS_MODELS = getattr(
settings, 'EXTENDED_SHELL_IMPORT_APPS_MODELS', True
)
# Custom imports
EXTENDED_SHELL_IMPORTS = getattr(
settings, 'EXTENDED_SHELL_IMPORTS', [])
# Import usefull utils
EXTENDED_SHELL_DEFAULTS = getattr(
settings, 'EXTENDED_SHELL_DEFAULTS', [
'django.conf.settings',
'django.core.cache.cache',
'django.utils.timezone',
'django.db.models.Avg',
'django.db.models.Count',
'django.db.models.F',
'django.db.models.Q',
'django.db.models.Max',
'django.db.models.Min',
'django.db.models.Sum',
]
)
|
{"/extended_shell/startup.py": ["/extended_shell/__init__.py"]}
|
16,436,374
|
detect-dev/django-extended-shell
|
refs/heads/main
|
/extended_shell/startup.py
|
from django.apps import apps
from extended_shell import settings as conf
from extended_shell import (
show_modules,
load_modules,
term
)
if conf.EXTENDED_SHELL_DEFAULTS:
term.write('# Extended shell django imports')
modules = conf.EXTENDED_SHELL_DEFAULTS
locals().update(
load_modules(modules))
show_modules(
modules
)
if conf.EXTENDED_SHELL_IMPORT_APPS_MODELS:
term.write('# Extended shell model imports')
models = apps.get_models()
locals().update({
model.__name__: model for
model in models
})
show_modules(
models
)
if conf.EXTENDED_SHELL_IMPORTS:
term.write('# Extended shell custom imports')
modules = conf.EXTENDED_SHELL_IMPORTS
locals().update(
load_modules(modules))
show_modules(
modules
)
del (
load_modules,
show_modules,
apps,
conf,
term
)
|
{"/extended_shell/startup.py": ["/extended_shell/__init__.py"]}
|
16,436,375
|
detect-dev/django-extended-shell
|
refs/heads/main
|
/extended_shell/__init__.py
|
import sys
from collections import namedtuple
from importlib import import_module
from django.core.management.base import OutputWrapper
from django.core.management.color import color_style
from . import settings
default_app_config = 'extended_shell.apps.ExtendedShellConfig'
style = color_style(
settings.EXTENDED_SHELL_COLORED
)
term = OutputWrapper(
sys.stdout
)
Import = namedtuple('Import', [
'module',
'name',
'alias'
])
def parse_import(path):
try:
path, alias = path.rsplit(' as ', 1)
alias = alias.strip()
except ValueError:
alias = None
module_path = name = path
try:
module_path, name = path.rsplit('.', 1)
except ValueError:
pass
return Import(
module_path.strip(),
name.strip(),
alias
)
def show_modules(modules):
imports = {}
strings = []
for module in modules:
if isinstance(module, str):
data = parse_import(module)
else:
try:
data = Import(
module.__module__,
module.__name__,
None
)
except AttributeError:
continue
name = (
data.module or
repr(data)
)
imports.setdefault(name, [])
imports[name].append(data)
for module, datas in imports.items():
tmpl = 'from {path} import {modules}'
modules = []
for data in datas:
if not data.module:
tmpl = 'import {modules}'
modules.append(
'{imp.name} as {imp.alias}'.format(imp=data)
if data.alias else data.name
)
strings.append(
tmpl.format(
path=module,
modules=', '.join(modules)
))
for line in reversed(strings):
term.write(style.SUCCESS(line))
def load_modules(pathes):
imports = {}
for path in pathes:
data = parse_import(path)
module = import_module(
data.module or data.name
)
imports[data.alias or data.name] = getattr(
module,
data.name,
module
)
return imports
|
{"/extended_shell/startup.py": ["/extended_shell/__init__.py"]}
|
16,436,376
|
detect-dev/django-extended-shell
|
refs/heads/main
|
/extended_shell/apps.py
|
import os
from django.apps import AppConfig
class ExtendedShellConfig(AppConfig):
name = 'extended_shell'
def ready(self):
os.environ.setdefault(
'PYTHONSTARTUP',
'{}/startup.py'.format(
self.path,
)
)
|
{"/extended_shell/startup.py": ["/extended_shell/__init__.py"]}
|
16,457,296
|
aakashg1999/Smarter-Charts
|
refs/heads/master
|
/totalColors.py
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
from math import sqrt
from math import hypot
import pdb
def setup(img_name,Centre,Radius):
img = cv2.imread(img_name)
CpPoint=Centre
rad=Radius
rad10pct=int(rad*0.2)
#pts=[[CpPoint[0]-rad10pct,CpPoint[1]+rad10pct],[CpPoint[0]+rad10pct,CpPoint[1]+rad10pct],[CpPoint[0]+rad10pct,CpPoint[1]-rad10pct],[CpPoint[0]-rad10pct,CpPoint[1]-rad10pct]]
def color_checking(li,check_point):
px=img[check_point[1]][check_point[0]]
for ele in li:
if ele[0]==px[0] and ele[1]==px[1] and ele[2]==px[2]:
return False
li.append(px)
#li_pts.append(check_point)
#print(check_point)
return True
def check_edge(check_point,check_var):
if check_var is 0:
pxm2=img[check_point[1]-3][check_point[0]]
px=img[check_point[1]][check_point[0]]
pxp2=img[check_point[1]+3][check_point[0]]
if pxp2[0]==px[0] and pxp2[1]==px[1] and pxp2[2]==px[2]:
return False
if pxm2[0]==px[0] and pxm2[1]==px[1] and pxm2[2]==px[2]:
return False
return True
if check_var is 1:
pxm2=img[check_point[1]][check_point[0]-3]
px=img[check_point[1]][check_point[0]]
pxp2=img[check_point[1]][check_point[0]+3]
if pxp2[0]==px[0] and pxp2[1]==px[1] and pxp2[2]==px[2]:
return False
if pxm2[0]==px[0] and pxm2[1]==px[1] and pxm2[2]==px[2]:
return False
return True
'''
cv2.circle(img,(pts[0][0],pts[0][1]),7,[0,0,255],-1)
cv2.circle(img,(pts[1][0],pts[1][1]),7,[0,0,255],-1)
cv2.circle(img,(pts[2][0],pts[2][1]),7,[0,0,255],-1)
cv2.circle(img,(pts[3][0],pts[3][1]),7,[0,0,255],-1)
cv2.imshow("naam",img)
'''
li=[]
li_pts=[]
li_edge=[]
for i in range(4):
pts=[[CpPoint[0]-rad10pct,CpPoint[1]-rad10pct],[CpPoint[0]+rad10pct,CpPoint[1]-rad10pct],[CpPoint[0]+rad10pct,CpPoint[1]+rad10pct],[CpPoint[0]-rad10pct,CpPoint[1]+rad10pct]]
start=pts[i]
end=pts[(i+1)%4]
#print(i , " ", i+1)
#print(start," ", end)
if start[0] is end[0]:
if (start[1]-end[1]) >= 0:
temp_pt_start=end
temp_pt_end=start
else:
temp_pt_start=start
temp_pt_end=end
while temp_pt_start[1] != temp_pt_end[1]:
temp_pt_start[1]+=1
TF=color_checking(li,temp_pt_start)
if TF is True:
TFE=check_edge(temp_pt_start,0)
#print(TFE)
if TFE is False:
li_pts.append([temp_pt_start[0],temp_pt_start[1]])
px=img[temp_pt_start[1]][temp_pt_start[0]]
li.append(px)
elif TFE is True:
li_edge.append([temp_pt_start[0],temp_pt_start[1]])
#print(li_pts)
else :
if (start[0]-end[0]) >= 0:
temp_pt_start=end
temp_pt_end=start
else:
temp_pt_start=start
temp_pt_end=end
while temp_pt_start[0] != temp_pt_end[0]:
temp_pt_start[0]+=1
TF=color_checking(li,temp_pt_start)
if TF is True:
TFE=check_edge(temp_pt_start,1)
#print(TFE)
if TFE is False:
li_pts.append([temp_pt_start[0],temp_pt_start[1]])
px=img[temp_pt_start[1]][temp_pt_start[0]]
li.append(px)
elif TFE is True:
li_edge.append([temp_pt_start[0],temp_pt_start[1]])
#print(li_pts)
# print(li)
print(li_edge)
for point in li_edge:
for point2 in li_edge:
if point[0] <= point2[0] and point[0]+2 >= point2[0] :
if point[1] <= point2[1] and point[1]+2 >= point2[1]:
if point[0] is not point2[0] or point[1] is not point2[1]:
li_edge.remove(point2)
#cv2.circle(img,(167,210),4,[255,0,0],-1)
#cv2.imshow("naam",img)
#print(li_pts," ")
#print(li_edge," ")
#CpPoint x1,y1
li_x3y3=[]
for point in li_edge:
try:
m=(point[1]-CpPoint[1])/(point[0]-CpPoint[0])
c=((CpPoint[1]*point[0])-(point[1]*CpPoint[0]))/(point[0]-CpPoint[0])
a=1
x3deno=1+(m**2)
x3b=(2*(m*c-CpPoint[1]*m-CpPoint[0]))/x3deno
x3c=(CpPoint[0]**2+c**2+CpPoint[1]**2-(2*CpPoint[1]*c)-rad**2)/x3deno
x3disc=((x3b)**2)-4*(x3c)
if x3disc > 0:
num_roots = 2
x31 = (((-x3b) + sqrt(x3disc))/(2*a))
x32 = (((-x3b) - sqrt(x3disc))/(2*a))
#print("There are 2 roots: %f and %f" % (x31, x32))
elif x3disc == 0:
num_roots = 1
x3 = (-x3b) / 2*a
#print("There is one root: ", x3)
elif x3disc <0:
print("Fatal Error")
if num_roots==2:
y31=m*x31+c
y32=m*x32+c
if hypot(x31-point[0],y31-point[1]) < hypot(x32-point[0],y32-point[1]):
x3=x31
y3=y31
else:
x3=x32
y3=y32
#print(y31," ",y32)
elif num_roots==1:
y3=m*x3+c
except:
#x1=x2=x3
x3=CpPoint[0]
y31=CpPoint[1]+rad
y32=CpPoint[1]-rad
if y31-point[1]>y32-point[1]:
y3=y32
else:
y3=y31
li_x3y3.append([x3,y3])
'''
for try_pt in li_edge:
#print(try_pt)
cv2.circle(img,(int(try_pt[0]),int(try_pt[1])),2,[0,0,255],-1)
for try_pt in li_x3y3:
#print(try_pt)
cv2.circle(img,(int(try_pt[0]),int(try_pt[1])),2,[0,0,255],-1)
cv2.imshow("naam",img)
'''
return(li_x3y3,li_edge,li_pts,li)
|
{"/app.py": ["/bargraph_driver.py", "/driverfn_pie.py", "/production.py"], "/driverfn_pie.py": ["/CentreDetect.py", "/RadiusCalc.py", "/totalColors.py", "/pointsCalc.py"]}
|
16,457,297
|
aakashg1999/Smarter-Charts
|
refs/heads/master
|
/dataset/train/hg/downloader.py
|
import urllib.request
import socket
import time
import re
def download_image(filename):
filename= (filename + '.txt')
t=1
for line in open(filename):
j=str(t)
try:
start=time.time()
socket.setdefaulttimeout(5)
urllib.request.urlretrieve(line,j+'.jpeg')
end=time.time()
print(t, -1*(start-end))
t=t+1
except:
continue
download_image('hg')
|
{"/app.py": ["/bargraph_driver.py", "/driverfn_pie.py", "/production.py"], "/driverfn_pie.py": ["/CentreDetect.py", "/RadiusCalc.py", "/totalColors.py", "/pointsCalc.py"]}
|
16,459,059
|
watay147/netdataanalysis
|
refs/heads/master
|
/webpage/views.py
|
#encoding=utf8
from django.shortcuts import render
from django.http import HttpResponse
from .models import company
from .models import events
from .models import news
from .models import statics
from django.shortcuts import get_object_or_404
from django.db.models import Q
import json
# Create your views here.
def index(request):
topcredit_list=company.objects.all()
attention_list=topcredit_list
context = {'topcredit_list': topcredit_list,'attention_list':attention_list}
return render(request,'index.html', context)
def indexcreditorders(request):
order_list=company.objects.order_by('creditorder').all()
context = {'order_list': order_list}
return render(request,'indexcreditorders.html',context)
def indexevents(request):
event_list=events.objects.order_by('hot').all()
context = {'event_list': event_list}
return render(request,'indexevents.html',context)
def indexnews(request):
new_list=news.objects.order_by('hot').all()
context = {'new_list': new_list}
return render(request,'indexnews.html',context)
def indexattentions(request):
return render(request,'indexattentions.html')
def indexsearch(request):
company_list=company.objects.filter(Q(name__icontains=request.GET['item'])|Q(stockno__icontains=request.GET['item']))
context={'company_list':company_list}
return render(request,'indexsearch.html',context)
def viewcompany(request,stockno):
acompany=get_object_or_404(company, stockno=stockno)
event_list=events.objects.order_by('hot').filter(stockno=stockno)
new_list=news.objects.order_by('hot').filter(stockno=stockno)
context={"company":acompany,
"event_list":event_list,
"new_list":new_list}
return render(request,'company.html',context)
def linedata(stockno):
plotdata=statics.objects.order_by('stadate').filter(stockno=stockno)
result={}
result['legend']=[u'舆情指数',u'股价']
result['category']=[str(x.stadate) for x in plotdata]#只能使用.xx访问而不是['xx']
result['series']=[]
result['series'].append({
'name':u'舆情指数',
'type':'line',
'data':[x.creditindex for x in plotdata]
})
result['series'].append({
'name':u'股价',
'type':'line',
'yAxisIndex':1,
'data':[x.price for x in plotdata]
})
return HttpResponse(json.dumps(result), content_type="application/json")
def piedata(stockno,sta,end):
piedata=statics.objects.order_by('-stadate').filter(stockno=stockno)[0]
total=piedata.possent+piedata.negsent+piedata.neusent
result={}
result['legend']=[u'正向',u'负向',u'中性']
result['series']=[]
result['series'].append({
'name':u'占比',
'type':'pie',
'center': ['30%', '60%'],
'data':[
{'value':float(piedata.possent)/total,'name':u'正向'},
{'value':float(piedata.negsent)/total,'name':u'负向'},
{'value':float(piedata.neusent)/total,'name':u'中性'}
]
})
return HttpResponse(json.dumps(result), content_type="application/json")
def complotdata(request):
stockno=request.GET['stockno']
plottype=request.GET['type']
if plottype=='line':
return linedata(stockno)
elif plottype=='pie':
return piedata(stockno,request.GET['sta'],request.GET['end'])
def viewevent(request,eventsid):
article=events.objects.get(id=eventsid)
context={"article":article}
return render(request,'article.html',context)
def viewnew(request,newsid):
article=news.objects.get(id=newsid)
context={"article":article}
return render(request,'article.html',context)
|
{"/webpage/views.py": ["/webpage/models.py"]}
|
16,513,769
|
AlZak1/django3-pythonapi
|
refs/heads/master
|
/app/serializers.py
|
from rest_framework import serializers
from .models import Human, HumanStatistics
class HumanSerializer(serializers.ModelSerializer):
class Meta:
model = Human
fields = '__all__'
class HumanStatisticsSerializer(serializers.ModelSerializer):
class Meta:
model = HumanStatistics
fields = '__all__'
|
{"/app/views.py": ["/app/models.py", "/app/serializers.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
16,513,770
|
AlZak1/django3-pythonapi
|
refs/heads/master
|
/app/views.py
|
from time import timezone
from rest_framework.permissions import IsAuthenticated
from .services import HumanService
from rest_framework.generics import CreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Human, HumanStatistics
from .serializers import HumanSerializer, HumanStatisticsSerializer
import datetime
# Create your views here.
class HumanView(CreateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = HumanSerializer, HumanStatisticsSerializer
queryset = Human.objects.all()
human_service = HumanService([])
def post(self, request, *args, **kwargs):
username = request.user.id
human = request.data
human["user"] = username
human_statistics = request.data
human_statistics['user'] = username
serializer = HumanStatisticsSerializer(data=human_statistics)
if serializer.is_valid():
serializer.save()
self.human_service.append_human_list(human)
total_score = self.human_service.process_human_list()
print('total_score', total_score)
response_data = {'user': username, 'total_damage': None, 'enemy_damage': None, 'current_damage': None, 'current_enemy_damage': None}
human_1 = Human.objects.get(user=1)
human_2 = Human.objects.get(user=2)
if len(total_score) == 2:
if username == 1:
response_data['total_damage'] = total_score['damage1'] + human_1.total_damage
response_data['enemy_damage'] = total_score['damage2'] + human_1.enemy_damage
response_data['current_damage'] = total_score['damage1']
response_data['current_enemy_damage'] = total_score['damage2']
human_1.total_damage = response_data['total_damage']
human_1.save()
human_1.enemy_damage = response_data['enemy_damage']
human_1.save()
else:
response_data['total_damage'] = total_score['damage2'] + human_2.total_damage
response_data['enemy_damage'] = total_score['damage1'] + human_2.enemy_damage
response_data['current_damage'] = total_score['damage2']
response_data['current_enemy_damage'] = total_score['damage1']
human_2.total_damage = response_data['total_damage']
human_2.save()
human_2.enemy_damage = response_data['enemy_damage']
human_2.save()
print('Данные отсылаемые клиенту по POST:', response_data)
serializer = HumanSerializer(data=response_data)
if serializer.is_valid():
print('serializer', serializer.data)
pass
return Response(serializer.data)
def get(self, request, *args, **kwargs):
username = request.user.id
human = {'user': username, 'total_damage': None, 'enemy_damage': None, 'current_damage': None, 'current_enemy_damage': None}
total_score = self.human_service.process_human_list()
human_1 = Human.objects.get(user=1)
human_2 = Human.objects.get(user=2)
if len(total_score) == 2:
if username == 1:
human['total_damage'] = total_score['damage1'] + human_1.total_damage
human['enemy_damage'] = total_score['damage2'] + human_1.enemy_damage
human['current_damage'] = total_score['damage1']
human['current_enemy_damage'] = total_score['damage2']
human_1.total_damage = human['total_damage']
human_1.save()
human_1.enemy_damage = human['enemy_damage']
human_1.save()
else:
human['total_damage'] = total_score['damage2'] + human_2.total_damage
human['enemy_damage'] = total_score['damage1'] + human_2.enemy_damage
human['current_damage'] = total_score['damage2']
human['current_enemy_damage'] = total_score['damage1']
human_2.total_damage = human['total_damage']
human_2.save()
human_2.enemy_damage = human['enemy_damage']
human_2.save()
print('edfsd', total_score)
print('Данные отсылаемые клиенту по GET:', human)
serializer = HumanSerializer(data=human)
if serializer.is_valid():
print('serializer', serializer.data)
self.human_service.human_list.clear()
pass
return Response(human)
class HumanStatisticsView(CreateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = HumanStatisticsSerializer
queryset = HumanStatistics.objects.all()
def get(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = HumanStatisticsSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
queryset = self.get_queryset()
data_to_filter = request.data
if data_to_filter['isAttack'] == 'all' and data_to_filter['fromDate'] is None and data_to_filter['toDate'] is None:
queryset = HumanStatistics.objects.all()
elif data_to_filter['isAttack'] == 'defense' and data_to_filter['fromDate'] is None and data_to_filter['toDate'] is None:
queryset = HumanStatistics.objects.filter(isAttack=False)
elif data_to_filter['isAttack'] == 'attack' and data_to_filter['fromDate'] is None and data_to_filter['toDate'] is None:
queryset = HumanStatistics.objects.filter(isAttack=True)
elif data_to_filter['isAttack'] is None and data_to_filter['fromDate'] is not None and data_to_filter['toDate'] is not None:
queryset = HumanStatistics.objects.filter(date_without_time__range=(data_to_filter['fromDate'], data_to_filter['toDate']))
elif data_to_filter['isAttack'] == 'attack' and data_to_filter['fromDate'] is not None and data_to_filter['toDate'] is not None:
queryset = HumanStatistics.objects.filter(isAttack=True, date_without_time__range=(data_to_filter['fromDate'], data_to_filter['toDate']))
elif data_to_filter['isAttack'] == 'defense' and data_to_filter['fromDate'] is not None and data_to_filter['toDate'] is not None:
queryset = HumanStatistics.objects.filter(isAttack=False, date_without_time__range=(data_to_filter['fromDate'], data_to_filter['toDate']))
elif data_to_filter['isAttack'] == 'all' and data_to_filter['fromDate'] is not None and data_to_filter['toDate'] is not None:
queryset = HumanStatistics.objects.filter(date_without_time__range=(data_to_filter['fromDate'], data_to_filter['toDate']))
serializer = HumanStatisticsSerializer(queryset, many=True)
return Response(serializer.data)
class LoadPageView(APIView):
serializer_class = HumanSerializer
def get(self, request):
username = request.user.id
human_1 = Human.objects.get(user=1)
human_2 = Human.objects.get(user=2)
human_object = {'user': username, 'total_damage': None, 'enemy_damage': None}
if username == 1:
human_object['total_damage'] = human_1.total_damage
human_object['enemy_damage'] = human_1.enemy_damage
else:
human_object['total_damage'] = human_2.total_damage
human_object['enemy_damage'] = human_2.enemy_damage
serializer = HumanSerializer(data=human_object)
if serializer.is_valid():
pass
return Response(serializer.data)
|
{"/app/views.py": ["/app/models.py", "/app/serializers.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
16,513,771
|
AlZak1/django3-pythonapi
|
refs/heads/master
|
/app/models.py
|
from django.db import models
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Human(models.Model):
head = models.BooleanField(default=False)
body = models.BooleanField(default=False)
leftHand = models.BooleanField(default=False)
rightHand = models.BooleanField(default=False)
leftLeg = models.BooleanField(default=False)
rightLeg = models.BooleanField(default=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
isAttack = models.BooleanField(default=False)
total_damage = models.IntegerField(default=None)
enemy_damage = models.IntegerField(default=None)
current_damage = models.IntegerField(default=None)
current_enemy_damage = models.IntegerField(default=None)
class HumanStatistics(models.Model):
head = models.BooleanField(default=False)
body = models.BooleanField(default=False)
leftHand = models.BooleanField(default=False)
rightHand = models.BooleanField(default=False)
leftLeg = models.BooleanField(default=False)
rightLeg = models.BooleanField(default=False)
date = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
isAttack = models.BooleanField(default=False)
date_without_time = models.DateField(auto_now_add=True)
|
{"/app/views.py": ["/app/models.py", "/app/serializers.py"], "/app/admin.py": ["/app/models.py"], "/app/serializers.py": ["/app/models.py"]}
|
16,534,877
|
StatisticsOfExtremes/CK0211TFinal
|
refs/heads/main
|
/getAnimais.py
|
import time
from globalOpt import limparTela, renderBackground
"""
Cada animal deve conter um registro informando o seu nome; a idade (em
meses), o porte (pequeno, médio e grande); a raça (caso não houver será ‘sem
raça definida’ - SRD); o lar temporário, onde o animal está ou esteve antes de
ser adotado; o nome do responsável pela adoção; e a data de adoção. Para um
animal que ainda não foi adotado os campos de “responsável” e “data de
adoção” devem ficar vazios.
"""
def addAnimal():
nomeAnimal = getData("Nome do Animal: ", 'str')
idadeAnimal = getData("Idade (em meses): ", 'int')
racaAnimal = getData("Raça: ", 'str')
nomeLar = getData("Lar temporário: ", 'str')
responsavel = getData("Responsável (Caso não tenha, deixar em branco): ", 'str')
dataAdocao = getData("Data da adoção(Caso não tenha, deixar em branco): ", 'str')
##inserirAnimal(nomeAnimal, racaAnimal, nomeLar, responsavel, dataAdocao)
def modAnimal():
codDel = int(input("Qual o código do bixinho para ser modificado? "))
def delAnimal():
codDel = int(input("Qual o código do bixinho para ser deletado? "))
def listarAnimais():
print("pega todos os bixos")
def opcoesMenuAnimais():
opcao = input("""
1 - Cadastrar novo animal
2 - Modificar dados de um animal existente
3 - Excluir dados de um animal existente
4 - Voltar para o menu anterior
""")
if opcao == "1":
renderBackground()
addAnimal()
opcoesMenuAnimais()
elif opcao == "2":
print("Escolha o bixinho para modificar:")
limparTela()
modAnimal()
opcoesMenuAnimais()
elif opcao == "3":
limparTela()
print("Escolha o bixinho para deletar:")
time.sleep(3)
listarAnimais()
delAnimal()
opcoesMenuAnimais()
else:
renderBackground()
return
def getData(Texto, Esperado):
while True:
try:
if Esperado == "int":
valor = int(input(Texto))
break
elif Esperado == "str":
valor = input(Texto)
break
elif Esperado == "flt":
valor = float(input(Texto))
break
except ValueError:
print("O tipo do valor inserido não corresponde ao esperado, por favor tente novamente")
except TypeError:
print("O tipo de valor inserido está incorreto")
return valor
|
{"/getAnimais.py": ["/globalOpt.py"], "/display.py": ["/getAnimais.py", "/getCandidato.py", "/globalOpt.py"]}
|
16,534,878
|
StatisticsOfExtremes/CK0211TFinal
|
refs/heads/main
|
/globalOpt.py
|
import os
welcomeStr = """
_____ _ _____ _____ _ _______
/ ____(_)/ ____| / ____| /\ | | |__ __|
| (___ _| (___ | | __ / \ | | | |
\___ \| |\___ \ | | |_ | / /\ \ | | | |
____) | |____) | | |__| |/ ____ \| |____| |
|_____/|_|_____/ \_____/_/ \_\______|_|
"""
print(welcomeStr)
def limparTela():
commando = 'clear'
if os.name in ('nt', 'dos'): # Se tiver usando o windows o comando para limpar a tela é 'cls'
commando = 'cls'
os.system(commando)
def renderBackground():
limparTela()
print(welcomeStr)
|
{"/getAnimais.py": ["/globalOpt.py"], "/display.py": ["/getAnimais.py", "/getCandidato.py", "/globalOpt.py"]}
|
16,534,879
|
StatisticsOfExtremes/CK0211TFinal
|
refs/heads/main
|
/getCandidato.py
|
<<<<<<< HEAD
from globalOpt import limparTela, renderBackground
def opcoesMenuCandidato():
opcao = input("""
1 - Cadastrar novo candidato
4 - Voltar para o menu anterior
""")
if opcao == "1":
limparTela()
addCandidato()
else:
renderBackground()
return
def addCandidato():
condFinan = input("Você possui condições financeiras para adotar um novo animal? (Sim/Não)")
tempoLivre = input("Avaliando sua rotina, você possui tempo livre para se dedicar ao seu novo pet? (Sim/Não)")
porteMax = input("Pense agora no espaço que você possui em casa:\n qual o porte máximo que o animal deverá ter para viver confortavelmente com você?(Pequeno/Médio/Grande)")
=======
from globalOpt import limparTela, renderBackground
def opcoesMenuCandidato():
opcao = input("""
1 - Cadastrar novo candidato
4 - Voltar para o menu anterior
""")
if opcao == "1":
limparTela()
addCandidato()
else:
renderBackground()
return
def addCandidato():
condFinan = input("Você possui condições financeiras para adotar um novo animal? (Sim/Não)")
tempoLivre = input("Avaliando sua rotina, você possui tempo livre para se dedicar ao seu novo pet? (Sim/Não)")
porteMax = input("Pense agora no espaço que você possui em casa:\n qual o porte máximo que o animal deverá ter para viver confortavelmente com você?(Pequeno/Médio/Grande)")
>>>>>>> master
|
{"/getAnimais.py": ["/globalOpt.py"], "/display.py": ["/getAnimais.py", "/getCandidato.py", "/globalOpt.py"]}
|
16,534,880
|
StatisticsOfExtremes/CK0211TFinal
|
refs/heads/main
|
/display.py
|
import getAnimais
import getAdocao
import getCandidato
import os
import globalOpt
def getOptions():
opcao = input("""
1 - Cadastrar novos animais
2 - Consultar animais
3 - Adotar um animal
4 - Sair
""")
if opcao == "1":
globalOpt.renderBackground()
getAnimais.opcoesMenuAnimais()
getOptions()
elif opcao == "2":
globalOpt.renderBackground()
getAnimais.listarAnimais()
getOptions()
elif opcao == "3":
globalOpt.renderBackground()
#getAdocao
getOptions()
elif opcao == '4':
return False
def main():
open = True
while open:
open = getOptions()
main()
|
{"/getAnimais.py": ["/globalOpt.py"], "/display.py": ["/getAnimais.py", "/getCandidato.py", "/globalOpt.py"]}
|
16,536,838
|
deepakyadav0223/super-garbanzo
|
refs/heads/master
|
/Ayesha.py
|
import pyttsx3
import speech_recognition as sr
import instaloader
import smtplib
import cv2
import datetime
import pyautogui
import wolframalpha
import json
import webbrowser
import wikipedia
import os
import pyjokes
import time
import requests
import pywhatkit as kit
from requests import get
from PIL import Image
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from PIL import Image
import time
from instabot import Bot
import qrcode
import random
#engine voices
engine = pyttsx3.init('sapi5')
voices =engine.getProperty('voices')
test = [1,2]
engine.setProperty('voice',voices[random.choice(test)].id)
#speak function
def speak(audio):
engine.say(audio)
engine.runAndWait()
#take command function
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
r.energy_threshold =490
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio,language = 'en-in')
print(f"Sir said :{query}")
except Exception as e:
speak("unable to recognise your voice . please say it again")
print("Unable to recognise your voice.please say it again ")
return "None"
return query
#mail send function
def sendEmail(to,content):
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.login('email','password')
server.sendmail('to',to,content)
server.close()
#wishme function
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour <12:
speak("Good Morning sir Did you eat breakfast?")
print("Good Morning sir Did you eat breakfast?")
elif hour>=12 and hour < 18:
speak("Good Afternoon sir did you eat lunch?")
print("Good Afternoon sir did you eat lunch?")
else:
speak("Good evening sir did you eat dinner ?")
print("Good evening sir did you eat dinner? ")
assname = ("I am ayesha , Yadav Assistant")
speak(assname)
print(assname)
speak("what work do you want from me")
print("what work do you want from me")
#other people intro
def usrname():
speak("What is your name")
print("what is your name")
uname = takeCommand()
if "Deepak Yadav" in uname:
speak("you are my Boss")
print("you are my Boss")
speak("Welcome Deepak Yadav")
print("Welcome Deepak Yadav")
else:
speak("Welcome Mister")
print("Welcome Mister")
speak(uname)
speak("How can i help you sir")
print("How can i help you sir")
#Main Function
if __name__ == '__main__':
wishMe()
# usrname()
while True:
if 1:
query = takeCommand().lower()
if 'wikipedia' in query:
speak("searching wikipedia")
print("searching wikipedia")
query = query.replace("wikipedia","")
results = wikipedia.summary(query,sentences = 3)
speak("according to wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
speak("here you go to youtube ")
webbrowser.open_new_tab("www.youtube.com")
time.sleep(10)
elif 'open google' in query:
speak("here you go to google ")
webbrowser.open_new_tab("www.google.com")
time.sleep(10)
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"sir,the time is{strTime}")
print(f"sir,the time is {strTime}")
elif 'which day' in query:
day = datetime.datetime.today().weekday()+1
time.sleep(2)
Day_dict = {1: 'Monday', 2: 'Tuesday',
3: 'Wednesday', 4: 'Thursday',
5: 'Friday', 6: 'Saturday',
7: 'Sunday'}
if day in Day_dict.keys():
day_of_the_week = Day_dict[day]
print(day_of_the_week)
speak("The day is " + day_of_the_week)
elif 'email to sameer' in query:
try:
speak("what should i say?")
print("what should i say")
content = takeCommand()
to = "sam5@gmail.com"
sendEmail(to,content)
speak("Email has been sent")
except Exception as e:
print(e)
speak("i am not able to send email")
elif 'how are you' in query:
speak("i am fine, Thank you!")
print("i am fine thank you!")
elif 'exit' in query or'no thanks'in query or 'quit ' in query:
speak("thank for using me :) ")
exit()
elif 'send multimedia mail' in query:
speak("What is the subject of mail?")
print("What is the subject of mail?")
io = takeCommand()
print("what is the body of mail")
speak("what is the body of mail")
h= takeCommand()
from_address = "dhgh0@gmail.com"
to_address = "onlijjjn4545@gmail.com"
msg = MIMEMultipart()
msg['From'] = from_address
msg['To'] = to_address
msg['Subject'] = io
body = h
msg.attach(MIMEText(body, 'plain'))
#filename here
filename = "elwal/content1"
attachment = open(
#enter complete link
r"C:\Useelwal\content1",
"rb")
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment;filename = %s" % filename)
msg.attach(p)
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(from_address, "pasword")
text = msg.as_string()
s.sendmail(from_address, to_address, text)
s.quit()
print("mail sent suceesfully!")
speak("mail sent suceesfully!")
elif'joke' in query:
speak(pyjokes.get_joke())
elif 'calculate' in query:
speak("running....")
print("running....")
#enter your api id
app_id = "take it for your from "
client = wolframalpha.Client(app_id)
indx = query.lower().split().index('calculate')
query = query.split()[indx + 1:]
res = client.query(' '.join(query))
answer= next(res.results).text
print("The answer is "+answer)
speak("The answer is "+answer)
elif 'search for' in query or 'play ' in query:
query = query.replace("search"," ")
query = query.replace("play ", " ")
webbrowser.open(query)
elif "send whatsapp message" in query:
kit.sendwhatmsg("+918854585411", " message ", 17, 30)
time.sleep(10)
speak("message sent successfully")
print("messsgae sent succesfully")
elif "play song on youtube" in query:
speak("tell me song name")
print("tell me song name")
h = takeCommand()
speak("playing..")
kit.playonyt(h)
elif 'is love' in query:
speak(" probably yes hmmm ")
print("probably yes hmm")
elif 'do not listen' in query:
speak("for how much time ")
print("for how much ")
a= int(takeCommand())
time.sleep(a)
print(a)
elif 'where is ' in query:
query = query.replace("where is "," ")
location = query
speak("You asked to locate")
print("you asked to locate")
speak(location)
time.sleep(10)
#not loading ERRROR
webbrowser.open("https://www.google.nl / maps / place/"+location + " ")
elif 'write a note' in query:
speak("what should i write")
note = takeCommand()
file= open('ayesha.txt','w')
speak("sir,Should i include date and time")
snfm = takeCommand()
if 'yes' in snfm or 'sure' in snfm:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
file.write(strTime)
file.write(" :- ")
file.write(note)
else:
file.write(note)
elif "show note" in query:
speak("showing notes")
print("showing notes")
file = open("ayesha.txt","r")
print(file.read())
speak(file.read(6))
elif "open notepad" in query:
npath = "C:\\Windows\\system32\\notepad.exe"
os.startfile(npath)
elif "handwritten" in query:
speak("What i should convert?")
print(("What is hould convert?"))
h = takeCommand()
print(h)
kit.text_to_handwriting(h, rgb=[0, 0, 0])
elif "open command promot" in query:
speak("opening..")
print("opening..")
os.system("start cmd")
# ERROR : camera not taking picture
elif "open camera" in query or 'launch camera' in query:
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
cv2.imshow('webcam', img)
k = cv2.waitKey(50)
if k == 27:
break;
cap.release()
cv2.destroyAllWindow()
elif "my ip address" in query:
ip = get('https://api.ipify.org').text
speak(f"your IP address is{ip}")
print(f"your IP address is{ip}")
elif 'download instagram profile photo' in query:
speak("honey, please give few second to download")
print("honey, please give few second to download")
mod = instaloader.Instaloader()
f = mod.download_profile("userhandle", profile_pic_only=True)
speak("Downloaded Sucessfully")
print("downloaded Sucessfully")
##have to write something
elif 'upload picture on instagram' in query:
bot = Bot()
speak("what caption i should write on picture")
print("what caption i should write on picture")
w = takeCommand()
bot.login(username="enter your username",
password="enter your password")
bot.upload_photo(
#link to the file
r"C:\User.pg",
caption=w)
speak("Photo uploaded successfully!")
print("Photo uploaded successfully!")
time.sleep(10)
elif "open movies" in query:
speak("opening sir")
print("opening sir")
webbrowser.open_new_tab("www.movies.com")
elif "ayesha" in query:
wishMe()
speak("I am ayesha , Deepak assistant")
print("I am ayesha , Deepak assistant")
elif 'revenge' in query or 'open blaster' in query:
speak("who trouble you ? ..give me chance i will see them")
print("who trouble you ? ..give me chance i will see them")
num ="enter your number"
print("sir tell me your stress level in low and high terms ")
speak("sir tell me your stress level in low and high terms")
gh = takeCommand()
if 'low' in gh:
frequency = 10
elif 'high' in gh :
frequency = 20
else :
frequency = 15
#flipkart
browser =webdriver.Chrome(ChromeDriverManager().install())
mobile_number = num
for i in range(frequency):
browser.get('https://www.flipkart.com/account/login?ret =/')
number = browser.find_element_by_class_name('_2IX_2-')
number.send_keys(mobile_number)
forgot = browser.find_element_by_link_text('Forgot?')
forgot.click()
time.sleep(10)
browser.quit()
print("still workin on...")
speak("still working on...")
for i in range(frequency):
driver = webdriver.Chrome()
driver.get(
"https://www.amazon.in/ap/signin?openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2Fwww.amazon.in%2Fgp%2Fcss%2Fhomepage.html%3Ffrom%3Dhz%26ref_%3Dnav_signin&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.assoc_handle=inflex&openid.mode=checkid_setup&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&")
driver.find_element_by_xpath('//*[@id="ap_email"]').send_keys(num)
driver.find_element_by_xpath('//*[@id="continue"]').click()
driver.find_element_by_xpath('//*[@id="auth-fpp-link-bottom"]').click()
driver.find_element_by_xpath('//*[@id="continue"]').click()
time.sleep(5)
driver.close()
print("Now i think revenge taken off. .. please drink water Sir")
speak("Now i think revenge taken off. .. please drink water Sir")
elif "weather" in query:
api_key = "Enter Your api key"
base_url = "http://api.openweathermap.org/data/2.5/weather?"
speak("tell me city name")
city_name = takeCommand()
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"]!= "404":
y = x["main"]
current_temperature = y["temp"]
current_pressure = y["pressure"]
current_humididty = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
print("Temperature (in kelvin unit) = " +str(current_temperature)+ "\n atmospheric pressure (in hPa unit) = "+str(current_pressure) +"\n humidity (in percentage) = "+str(current_humididty) +"\n description = " +str(weather_description))
speak("Temperature (in kelvin unit) is "+str(current_temperature)+ "\n atmospheric pressure (in hPa unit) is "+str(current_pressure) +"\n humidity (in percentage) is "+str(current_humididty) +"\n description is " +str(weather_description))
else:
speak("City Not Found")
print("City Not Found")
elif"wikipedia" in query:
speak("opening")
webbrowser.open("www.wikipedia.com")
elif "what is" in query :
speak("running ...")
print("running...")
client = wolframalpha.Client("PAss here app id ")
res = client.query(query)
try:
print(next(res.results).text)
speak(next(res.results).text)
except StopIteration:
print("No results")
speak("no results")
elif "volume up" in query:
pyautogui.press("volumeup")
elif "volume down" in query:
pyautogui.press("volumedown")
elif "volume mute" in query or "mute" in query:
pyautogui.press("volumemute")
elif "set alarm" in query:
speak("Tell me time in this manner set alarm to 5:30 am")
print ("Tell me time in this manner set alarm to 5:30 am")
q = takeCommand();
q = q.replace("set alarm to","")
time.sleep(10)
q = q.replace(".","")
time.sleep(10)
q= q.upper()
time.sleep(2)
import Alarm
Alarm.Alaram(q)
elif "screenshot" in query or"take screenshot" in query:
speak("capturing...")
print("capturing...")
screenshot = pyautogui.screenshot()
screenshot.save("swh.png")
try:
image = Image.open("swh.png")
image.show()
except :
time.sleep(5)
elif "who is " in query:
try:
speak("running ...")
print("running...")
client = wolframalpha.Client(" enter your App id")
res = client.query(query)
print(next(res.results).text)
speak(next(res.results).text)
except :
query = query.split(' ')
query = " ".join(query[0:])
speak("I am searching for " + query)
print(wikipedia.summary(query, sentences=3))
speak(wikipedia.summary(query,
sentences=3))
elif 'send a text message' in query:
speak("to whom?")
print("to whom?")
loe = "enter recvier no"
print(loe)
speak("what should i say")
print("what should i say")
com = takeCommand()
print(com)
url = "https://www.fast2sms.com/dev/bulk"
my_data = {
# Your default Sender ID
'sender_id': 'FSTSMS',
# Put your message here!
'message': com,
'language': 'english',
'route': 'p',
# You can send sms to multiple numbers
# separated by comma.
'numbers': loe
}
headers = {
'authorization': 'Enter Your own id getting from post registration',
'Content-Type': "application/x-www-form-urlencoded",
'Cache-Control': "no-cache"
}
response = requests.request("POST",
url,
data=my_data,
headers=headers)
# load json data from source
returned_msg = json.loads(response.text)
# print the send message
print(returned_msg['message'])
speak("Sms Sent Sucessfully")
elif 'upload picture on instagram' in query:
print("Since i had picture path.so, i am uploading this..")
speak("Since i had picture path.so, i am uploading this..")
print("What should i write the caption of this picture")
speak("What should i write the caption of this picture")
gi = takeCommand()
print(qi)
bot = Bot()
bot.login(username="ENter your usernmae",
password="enter password")
bot.upload_photo(
r"C:\Uprofile_pic.jpg",
caption=qi)
print("picture uploaded successfully sir")
speak("picture uploaded successfully sir")
elif 'open mobile camera' in query:
import urllib.request
import cv2
import numpy as np
import time
Url= "here enter your own https link/shot.jpg"
while True:
img_arr = np.array(bytearray(urllib.request.urlopen(Url).read()),dtype=np.uint8)
img=cv2.imdecode(img_arr,-1)
cv2.imshow("IPWebcam",img)
q =cv2.waitKey(1)
if q==ord("q"):
break;
cv2.destroyAllWindows()
elif 'check network speed' in query:
import speedtest
st = speedtest.Speedtest()
d1 =st.download()
up=st.upload()
print(f"sir upload speed is {up} bit per second and download speed is{d1} bit per second")
speak(f"sir upload speed is {up} bit per second and download speed is{d1} bit per second")
elif 'check battery' in query:
import psutil
battery = psutil.sensors_battery()
per = battery.percent
print(f"sir our system have {per} percent battery")
speak(f"sir our system have {per} percent battery")
elif 'encrypt'in query or 'change to qr' in query:
print("what message you want to convert in qr..please tell me Sir")
speak("what message you want to convert in qr..please tell me sir")
qe = takeCommand()
data = qe
qr = qrcode.QRCode(version=1,
box_size=10,
border=5,
)
qr.add_data(data)
# Encoding data using make() function
qr.make(fit=True)
img = qr.make_image(fill_color="black",
back_color="white"
)
# Saving as an image file
img.save('deep.png')
im = Image.open("deep.png")
im.show()
speak("do you have any other work ?")
print("do you have other work ?")
|
{"/Ayesha.py": ["/Alarm.py"]}
|
16,536,839
|
deepakyadav0223/super-garbanzo
|
refs/heads/master
|
/Alarm.py
|
import datetime
import winsound
def Alaram(timeing):
altt = str(datetime.datetime.now().strptime(timeing,"%I:%M:%p"))
altt = altt[11:-3]
h=altt[:2]
h=int(h)
m= altt[3:5]
m=int(m)
print(f"Alram Set Successfully for {timeing}")
while True:
if h==datetime.datetime.now().hour:
if m==datetime.datetime.now().minute:
print("alram is running")
winsound.PlaySound('abc',winsound.SND_LOOP)
elif m<datetime.datetime.now().minute:
break
if __name__ == '__main__':
Alaram('1:26:AM')
|
{"/Ayesha.py": ["/Alarm.py"]}
|
16,539,957
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/models/ts.py
|
# -*- coding: utf-8 -*-
from layers.dynamic_rnn import DynamicLSTM
import torch
import torch.nn as nn
import pdb
import torch.nn.functional as F
import numpy as np
def generate_formal_adj(init_adj):
'''input: a simple adj with a size of (row, column)
output: a complete and formal adj with a size of (row+column, row+column)'''
batch, row, column = init_adj.shape
# up left matrix (batch, row, row)
lu = torch.tensor(np.zeros((batch, row, row)).astype('float32')).cuda()
# up right (batch, row, column)
ru = init_adj.cuda()
# down left (batch, column, row)
ld = init_adj.transpose(1, 2).cuda()
# down right (batch, column, column)
rd = torch.tensor(np.zeros((batch, column, column)).astype('float32')).cuda()
# up (batch, row, row+column)
up = torch.cat([lu.float(), ru.float()], -1).cuda()
# down (batch, column, row+column)
down = torch.cat([ld.float(), rd.float()], -1).cuda()
# final (batch, row+column, row+column)
final = torch.cat([up,down],1).cuda()
return final.cuda()
def preprocess_adj(A):
'''
for batch data
Pre-process adjacency matrix
:param A: adjacency matrix
:return:
'''
# prepare
assert A.shape[-1] == A.shape[-2]
batch = A.shape[0]
num = A.shape[-1]
# generate eye
I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
#
A_hat = A.cuda() + I
#
D_hat_diag = torch.sum(A_hat.cuda(), axis=-1)
#
D_hat_diag_inv_sqrt = torch.pow(D_hat_diag.cuda(), -0.5)
# inf
D_hat_diag_inv_sqrt = torch.where(torch.isinf(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
D_hat_diag_inv_sqrt = torch.where(torch.isnan(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
#
tem_I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
D_hat_diag_inv_sqrt_ = D_hat_diag_inv_sqrt.unsqueeze(-1).repeat(1,1,num).cuda()
D_hat_inv_sqrt = D_hat_diag_inv_sqrt_ * tem_I
#
return torch.matmul(torch.matmul(D_hat_inv_sqrt.cuda(), A_hat.cuda()), D_hat_inv_sqrt.cuda())
class SequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class CustomizeSequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(CustomizeSequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_obj = nn.Linear(hidden_size, int(hidden_size / 2))
self.linear_a = nn.Linear(hidden_size, self.tag_size)
self.linear_o = nn.Linear(hidden_size, self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
# share
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
# ATE
features_tmp_a = self.hidden2tag_sub(input_features)
features_tmp_a = nn.ReLU()(features_tmp)
features_tmp_a = self.dropout(features_tmp)
# OTE
features_tmp_o = self.hidden2tag_obj(input_features)
features_tmp_o = nn.ReLU()(features_tmp)
features_tmp_o = self.dropout(features_tmp)
# cat
features_for_a = torch.cat([features_tmp, features_tmp_a], -1)
features_for_o = torch.cat([features_tmp, features_tmp_o], -1)
# classifier
sub_output = self.linear_a(features_for_a)
obj_output = self.linear_a(features_for_o)
return sub_output, obj_output
class SequenceLabelForAOS(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAOS, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_senti = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
senti_output = self.hidden2tag_senti(features_tmp)
return sub_output, obj_output, senti_output
class SequenceLabelForTriple(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForTriple, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class MultiNonLinearClassifier(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(MultiNonLinearClassifier, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
features_output = self.hidden2tag(features_tmp)
return features_output
class SequenceLabelForGrid(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForGrid, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, text, adj):
hidden = torch.matmul(text, self.weight)
denom = torch.sum(adj, dim=2, keepdim=True) + 1
# import pdb; pdb.set_trace()
# adj = torch.tensor(adj)
adj = torch.tensor(adj, dtype=torch.float32)
# hidden = torch.tensor(hidden)
hidden = torch.tensor(hidden, dtype=torch.float32)
output = torch.matmul(adj.cuda(), hidden.cuda()) / denom.cuda()
# print(output.shape)
# print(self.bias.shape)
if self.bias is not None:
return output + self.bias
else:
return output
class PairGeneration(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, features, bias=False):
super(PairGeneration, self).__init__() # 32,13,300 32,300,13
self.features = features
# self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(features, features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(features))
else:
self.register_parameter('bias', None)
def forward(self, text):
hidden = torch.matmul(text.float(), self.weight)
# print(hidden.shape)
# denom = torch.sum(adj, dim=2, keepdim=True) + 1
# adj = torch.tensor(adj, dtype=torch.float32)
hidden_ = torch.tensor(hidden, dtype=torch.float32)
# print(hidden_.shape)
output = torch.matmul(hidden_, hidden.permute(0,2,1))
# print(output.shape)
if self.bias is not None:
return output + self.bias
else:
return output
class PairGeneration0(nn.Module):
def __init__(self, in_dim):
super(PairGeneration0, self).__init__() # 32,13,300 32,300,13
# self.weight1 = nn.Parameter(torch.FloatTensor(in_dim, int(in_dim/2)))
# self.weight2 = nn.Parameter(torch.FloatTensor(in_dim, int(in_dim/2)))
self.weight1 = nn.Parameter(torch.FloatTensor(in_dim, in_dim))
self.weight2 = nn.Parameter(torch.FloatTensor(in_dim, in_dim))
self.bias1 = nn.Parameter(torch.FloatTensor(in_dim))
def forward(self, text):
hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)
hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)
# hidden_1 = torch.matmul(hidden_1, self.weight1)
# hidden_1 = hidden_1 + self.bias1
# hidden_2 = torch.matmul(hidden_2, self.weight2)
output = torch.cat((hidden_1, hidden_2),-1)
# hidden_1 = torch.matmul(hidden_1, self.weight1)
# hidden_2 = torch.matmul(hidden_2, self.weight2)
# output = hidden_1 + hidden_2
return output
class GCNLayer(nn.Module):
def __init__(self, in_dim, out_dim, acti=True):
super(GCNLayer, self).__init__()
self.linear = nn.Linear(in_dim, out_dim) # bias = False is also ok.
if acti:
self.acti = nn.ReLU(inplace=True)
else:
self.acti = None
def forward(self, F):
output = self.linear(F)
if not self.acti:
return output
return self.acti(output)
class GCN(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes, p):
super(GCN, self).__init__()
self.gcn_layer1 = GCNLayer(input_dim, hidden_dim)
self.gcn_layer2 = GCNLayer(hidden_dim, num_classes, acti=False)
self.dropout = nn.Dropout(p)
def forward(self, A, X):
X = self.dropout(X.float())
F = torch.mm(A, X)
F = self.gcn_layer1(F)
F = self.dropout(F)
F = torch.mm(A, F)
output = self.gcn_layer2(F)
return output
class TS(nn.Module):
def __init__(self, embedding_matrix, opt):
super(TS, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.lstm = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
self.lstm_ = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
self.aspect_opinion_classifier = SequenceLabelForAO(600, 3, 0.5)
self.pair_sentiment_classifier = MultiNonLinearClassifier(1200, 4, 0.5)
self.triple_classifier = SequenceLabelForTriple(1200, 3, 0.5)
self.pair_fc = nn.Linear(1200, 600)
self.triple_fc = nn.Linear(1200, 600)
self.pair_cls = nn.Linear(600, 3)
self.triple_cls = nn.Linear(600, 4)
self.text_embed_dropout = nn.Dropout(0.5)
self.pairgeneration = PairGeneration0(600)
def forward(self, inputs, mask):
# input
text_indices, mask, _, _, _ = inputs
# prepare
batch_size = text_indices.shape[0]
sentence_len = text_indices.shape[1]
# get sentence mask
mask_ = mask.view(-1,1)
# input sentnece s_0
text_len = torch.sum(text_indices != 0, dim=-1)
word_embeddings = self.embed(text_indices)
text = self.text_embed_dropout(word_embeddings)
text_out, (_, _) = self.lstm(text, text_len.cpu()) # 32, 13, 600
# pair generation
pair_text = self.pairgeneration(text_out)
# AE and OE scores (BIO tagging)
aspect_probs, opinion_probs = self.aspect_opinion_classifier(text_out.float())
aspect_probs, opinion_probs = aspect_probs.contiguous().view(-1, 3), opinion_probs.contiguous().view(-1, 3)
# pair scores
pair_probs_, triple_probs_ = self.triple_classifier(pair_text.float())
# !!!
# pair_hidden = self.pair_fc(pair_text)
# triple_hidden = self.triple_fc(pair_text)
# triple_atten = F.softmax(torch.matmul(pair_hidden, triple_hidden.permute(0,1,3,2)), dim=-1)
# pair_atten = F.softmax(torch.matmul(triple_hidden, pair_hidden.permute(0,1,3,2)), dim=-1)
# pair_hidden = torch.matmul(pair_atten, pair_hidden)
# triple_hidden = torch.matmul(triple_atten, triple_hidden)
# pair_probs_, triple_probs_ = self.pair_cls(pair_hidden), self.triple_cls(triple_hidden)
# !!!
pair_probs = pair_probs_.contiguous().view(-1, 3)
triple_probs = triple_probs_.contiguous().view(-1, 4)
return aspect_probs, opinion_probs, pair_probs, triple_probs
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,958
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/data_utils_gts.py
|
# -*- coding: utf-8 -*-
import os
import pickle
import numpy as np
import pdb
from tqdm import tqdm
def load_word_vec(path, word2idx=None, embed_dim=300):
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
word_vec = {}
for line in fin:
tokens = line.rstrip().split()
word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]
if word in word2idx.keys():
word_vec[word] = np.asarray(vec, dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, type):
embedding_matrix_file_name = '{0}_{1}_embedding_matrix.pkl'.format(str(embed_dim), type)
if os.path.exists(embedding_matrix_file_name):
print('loading embedding_matrix:', embedding_matrix_file_name)
embedding_matrix = pickle.load(open(embedding_matrix_file_name, 'rb'))
else:
print('loading word vectors ...')
embedding_matrix = np.zeros((len(word2idx), embed_dim)) # idx 0 and 1 are all-zeros
embedding_matrix[1, :] = np.random.uniform(-1/np.sqrt(embed_dim), 1/np.sqrt(embed_dim), (1, embed_dim))
fname = './glove/glove.840B.300d.txt'
word_vec = load_word_vec(fname, word2idx=word2idx, embed_dim=embed_dim)
print('building embedding_matrix:', embedding_matrix_file_name)
for word, i in word2idx.items():
vec = word_vec.get(word)
if vec is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = vec
pickle.dump(embedding_matrix, open(embedding_matrix_file_name, 'wb'))
return embedding_matrix
class Tokenizer(object):
def __init__(self, word2idx=None):
if word2idx is None:
self.word2idx = {}
self.idx2word = {}
self.idx = 0
self.word2idx['<pad>'] = self.idx
self.idx2word[self.idx] = '<pad>'
self.idx += 1
self.word2idx['<unk>'] = self.idx
self.idx2word[self.idx] = '<unk>'
self.idx += 1
else:
self.word2idx = word2idx
self.idx2word = {v:k for k,v in word2idx.items()}
def fit_on_text(self, text):
text = text.lower()
words = text.split()
for word in words:
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def text_to_sequence(self, text):
text = text.lower()
words = text.split()
unknownidx = 1
sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]
if len(sequence) == 0:
sequence = [0]
return sequence
class ABSADataset(object):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ABSADatasetReader:
@staticmethod
def __read_text__(fnames):
'''a string: sentence1\nsentence2\n...sentencen\n'''
text = ''
for fname in fnames:
fin = open(fname)
lines = fin.readlines()
fin.close()
for i in range(0, len(lines)):
text += lines[i].split('####')[0].lower().strip()+'\n'
return text
@staticmethod
def __read_triplets__(fnames):
'''a list: [[([2], [5], 'NEG')], [(),()], [], ..., []]'''
triplets = []
for fname in fnames:
fin = open(fname)
lines = fin.readlines()
fin.close()
for i in range(0, len(lines)):
triple = eval(lines[i].split('####')[1])
triplets.append(triple)
return triplets
@staticmethod
def __read_all_sentence__(domain):
'''read all sentence (train/dev/test) to get the representation of relevant sentences'''
train_data = open('./ASTE-Data-V2/'+domain+'/train_triplets.txt','r').readlines()
dev_data = open('./ASTE-Data-V2/'+domain+'/dev_triplets.txt','r').readlines()
test_data = open('./ASTE-Data-V2/'+domain+'/test_triplets.txt','r').readlines()
train_sentences = [line.split('####')[0] for line in train_data]
dev_sentences = [line.split('####')[0] for line in dev_data]
test_sentences = [line.split('####')[0] for line in test_data]
all_sentences = train_sentences + dev_sentences + test_sentences
return all_sentences
@staticmethod
def __triple2bio__(sentences, triplets):
'''
convert triplets to BIO labels
000120000000
000000012220
000330000000
pos1, neg2, neu3
'''
sentences = sentences.strip('\n').split('\n')
sentiment_dic = {'POS':1, 'NEG':2, 'NEU':3}
aspect_labels, opinion_labels, sentiment_labels = [], [], []
for sentence, triplet in zip(sentences, triplets):
sentence = sentence.strip('\n').split()
a_labels = [0 for i in range(len(sentence))]
o_labels = [0 for i in range(len(sentence))]
s_labels = [0 for i in range(len(sentence))]
for tri in triplet:
begin, inside = 1, 2
a_index, o_index, polarity = tri
for i in range(len(a_index)):
if i == 0:
a_labels[a_index[i]] = begin
s_labels[a_index[i]] = sentiment_dic[polarity]
else:
a_labels[a_index[i]] = inside
s_labels[a_index[i]] = sentiment_dic[polarity]
for i in range(len(o_index)):
if i == 0:
o_labels[o_index[i]] = begin
else:
o_labels[o_index[i]] = inside
aspect_labels.append(a_labels)
opinion_labels.append(o_labels)
sentiment_labels.append(s_labels)
return aspect_labels, opinion_labels, sentiment_labels
@staticmethod
def __triple2span__(sentences, triplets):
'''
convert bio labels to span labels
00000
01000
00000
00000
the index of 1 denotes the start and end of term
'''
sentences = sentences.strip('\n').split('\n')
aspect_span, opinion_span = [], []
for sentence, triple in zip(sentences, triplets):
sentence = sentence.strip('\n').split()
matrix_span_aspect = np.zeros((len(sentence), len(sentence))).astype('float32')
matrix_span_opinion = np.zeros((len(sentence), len(sentence))).astype('float32')
for tri in triple:
a_start, a_end, o_start, o_end = tri[0][0], tri[0][-1], tri[1][0], tri[1][-1]
matrix_span_aspect[a_start][a_end] = 1
matrix_span_opinion[o_start][o_end] = 1
aspect_span.append(matrix_span_aspect)
opinion_span.append(matrix_span_opinion)
return aspect_span, opinion_span
@staticmethod
def __triple2grid__(sentences, triplets):
'''
convert triplets to grid label for pair and triplet
row aspect, col opinion
00000 00000
01220 03330
02220 03330 pos1 neg2 neu3
padding
'''
sentiment_dic = {'POS':1, 'NEG':2, 'NEU':3}
sentences = sentences.strip('\n').split('\n')
pair_grid_labels, triple_grid_labels = {}, {}
for i in range(len(sentences)):
sentence, triplet = sentences[i].strip('\n').split(), triplets[i]
matrix_pair = np.zeros((len(sentence), len(sentence))).astype('float32')
matrix_triple = np.zeros((len(sentence), len(sentence))).astype('float32')
for tri in triplet:
for j in tri[0]:
for k in tri[1]:
matrix_pair[j][k] = 2
matrix_triple[j][k] = sentiment_dic[tri[2]]
matrix_pair[tri[0][0]][tri[1][0]] = 1
pair_grid_labels[i] = matrix_pair
triple_grid_labels[i] = matrix_triple
return pair_grid_labels, triple_grid_labels
@staticmethod
def __mask__(sentences):
sentences = sentences.strip('\n').split('\n')
mask = []
for sentence in sentences:
sentence = sentence.strip('\n').split()
mask.append([1]*len(sentence))
return mask
@staticmethod
def __read_data__(fname, domain, phase, tokenizer):
# read raw data
sentence = ABSADatasetReader.__read_text__([fname]) # a long string splited by '\n'
triplets = ABSADatasetReader.__read_triplets__([fname]) # a long list containing multiple lists for sentences
assert len(sentence.strip('\n').split('\n')) == len(triplets)
all_sentences = ABSADatasetReader.__read_all_sentence__(domain)
# generate basic labels
aspect_sequence_labels, opinion_sequence_labels, sentiment_sequence_labels = ABSADatasetReader.__triple2bio__(sentence, triplets)
aspect_span_labels, opinion_span_labels = ABSADatasetReader.__triple2span__(sentence, triplets)
pair_grid_labels, triple_grid_labels = ABSADatasetReader.__triple2grid__(sentence, triplets)
text_mask = ABSADatasetReader.__mask__(sentence)
# read relevant sentences
relevant_sentences_index = open('./ASTE-Rele-Sentences/'+domain + '/' + phase + '_r_fine_tune_52.txt', 'r').read().split('\n')
# local graph
# local_graph = pickle.load(open('./ASTE-Graph-V2/' + domain + '/local_graph/' + phase + '_l.graph', 'rb'))
# four types of global graphs
global_graph0 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph0/' + phase + '_g_final.graph', 'rb'))
global_graph1 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph1/' + phase + '_g_final.graph', 'rb'))
global_graph2 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph2/' + phase + '_g_final.graph', 'rb'))
global_graph3 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph3/' + phase + '_g_final.graph', 'rb'))
# store all data for bucket
all_data = []
lines = sentence.strip('\n').split('\n')
for i in range(0, len(lines)):
# raw text, text indices and text mask
text = lines[i].lower().strip()
text_indices = tokenizer.text_to_sequence(text)
mask = text_mask[i]
# index of relevant sentence for this sentence
relevant_sentences = [int(idx) for idx in relevant_sentences_index[i].strip().split()]
# indieces of relevant sentence for this sentence (representation)
relevant_sentences_presentation = []
for mm in relevant_sentences:
tem_sentence = all_sentences[mm]
sentence_indices = tokenizer.text_to_sequence(tem_sentence)
relevant_sentences_presentation.append(sentence_indices)
# different graphs for this sentence
# local_graph_ = local_graph[i]
global_graph_0, global_graph_1, global_graph_2, global_graph_3 = \
global_graph0[i], global_graph1[i], global_graph2[i], global_graph3[i]
# different labels for this sentence
aspect_sequence_label, opinion_sequence_label, sentiment_sequence_label, aspect_span_label, opinion_span_label, pair_grid_label, triple_grid_label = \
aspect_sequence_labels[i], opinion_sequence_labels[i], sentiment_sequence_labels[i], aspect_span_labels[i], opinion_span_labels[i], \
pair_grid_labels[i], triple_grid_labels[i]
# package
data = {
'text_indices': text_indices,
'mask': mask,
'global_graph0': global_graph_0,
'global_graph1': global_graph_1,
'global_graph2': global_graph_2,
'global_graph3': global_graph_3,
# 'local_graph': local_graph_,
'relevant_sentences': relevant_sentences,
'relevant_sentence_presentation':relevant_sentences_presentation,
'aspect_sequence_label': aspect_sequence_label,
'opinion_sequence_label': opinion_sequence_label,
'sentiment_sequence_label': sentiment_sequence_label,
'aspect_span_labels': aspect_span_label,
'opinion_span_labels': opinion_span_label,
'pair_grid_labels': pair_grid_label,
'triple_grid_labels': triple_grid_label
}
all_data.append(data)
return all_data
def __init__(self, dataset='res14', embed_dim=300):
print("preparing {0} dataset ...".format(dataset))
fname = {
'res14': {
'train': './ASTE-Data-V2/res14/train_triplets.txt',
'test': './ASTE-Data-V2/res14/test_triplets.txt',
'dev': './ASTE-Data-V2/res14/dev_triplets.txt'
},
'lap14': {
'train': './ASTE-Data-V2/lap14/train_triplets.txt',
'test': './ASTE-Data-V2/lap14/test_triplets.txt',
'dev': './ASTE-Data-V2/lap14/dev_triplets.txt'
},
'res15': {
'train': './ASTE-Data-V2/res15/train_triplets.txt',
'test': './ASTE-Data-V2/res15/test_triplets.txt',
'dev': './ASTE-Data-V2/res15/dev_triplets.txt'
},
'res16': {
'train': './ASTE-Data-V2/res16/train_triplets.txt',
'test': './ASTE-Data-V2/res16/test_triplets.txt',
'dev': './ASTE-Data-V2/res16/dev_triplets.txt'
},
'mams': {
'train': './ASTE-Data-V2/res16/train_triplets.txt',
'test': './ASTE-Data-V2/res16/test_triplets.txt',
'dev': './ASTE-Data-V2/res16/dev_triplets.txt'
}
}
text = ABSADatasetReader.__read_text__([fname[dataset]['train'], fname[dataset]['dev'], fname[dataset]['test']])
if os.path.exists(dataset+'_word2idx.pkl'):
print("loading {0} tokenizer...".format(dataset))
with open(dataset+'_word2idx.pkl', 'rb') as f:
word2idx = pickle.load(f)
tokenizer = Tokenizer(word2idx=word2idx)
else:
tokenizer = Tokenizer()
tokenizer.fit_on_text(text)
with open(dataset+'_word2idx.pkl', 'wb') as f:
pickle.dump(tokenizer.word2idx, f)
self.embedding_matrix = build_embedding_matrix(tokenizer.word2idx, embed_dim, dataset)
self.train_data = ABSADataset(ABSADatasetReader.__read_data__(fname=fname[dataset]['train'], domain=dataset, phase='train', tokenizer=tokenizer))
self.dev_data = ABSADataset(ABSADatasetReader.__read_data__(fname=fname[dataset]['dev'], domain=dataset, phase='dev', tokenizer=tokenizer))
self.test_data = ABSADataset(ABSADatasetReader.__read_data__(fname=fname[dataset]['test'], domain=dataset, phase='test', tokenizer=tokenizer))
if __name__ == '__main__':
tokenizer = Tokenizer()
ABSADatasetReader.__read_data__(fname='./ASTE-Data-V2/res14/train_triplets.txt', domain='res14', phase='train', tokenizer=tokenizer)
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,959
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/models/ts0.py
|
# -*- coding: utf-8 -*-
from layers.dynamic_rnn import DynamicLSTM
import torch
import torch.nn as nn
import pdb
import torch.nn.functional as F
from torch.nn import init
import numpy as np
import math
from torch import Tensor
def generate_formal_adj(init_adj):
'''input: a simple adj with a size of (row, column)
output: a complete and formal adj with a size of (row+column, row+column)'''
batch, row, column = init_adj.shape
# up left matrix (batch, row, row)
lu = torch.tensor(np.zeros((batch, row, row)).astype('float32')).cuda()
# up right (batch, row, column)
ru = init_adj.cuda()
# down left (batch, column, row)
ld = init_adj.transpose(1, 2).cuda()
# down right (batch, column, column)
rd = torch.tensor(np.zeros((batch, column, column)).astype('float32')).cuda()
# up (batch, row, row+column)
up = torch.cat([lu.float(), ru.float()], -1).cuda()
# down (batch, column, row+column)
down = torch.cat([ld.float(), rd.float()], -1).cuda()
# final (batch, row+column, row+column)
final = torch.cat([up,down],1).cuda()
return final.cuda()
def preprocess_adj(A):
'''
for batch data
Pre-process adjacency matrix
:param A: adjacency matrix
:return:
'''
# prepare
assert A.shape[-1] == A.shape[-2]
batch = A.shape[0]
num = A.shape[-1]
# generate eye
I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
#
A_hat = A.cuda()#+ I # - I
#
D_hat_diag = torch.sum(A_hat.cuda(), axis=-1)
#
D_hat_diag_inv_sqrt = torch.pow(D_hat_diag.cuda(), -0.5)
# inf
D_hat_diag_inv_sqrt = torch.where(torch.isinf(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
D_hat_diag_inv_sqrt = torch.where(torch.isnan(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
#
tem_I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
D_hat_diag_inv_sqrt_ = D_hat_diag_inv_sqrt.unsqueeze(-1).repeat(1,1,num).cuda()
D_hat_inv_sqrt = D_hat_diag_inv_sqrt_ * tem_I
#
return torch.matmul(torch.matmul(D_hat_inv_sqrt.cuda(), A_hat.cuda()), D_hat_inv_sqrt.cuda())
class SequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class SequenceLabelForAOS(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAOS, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_senti = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
senti_output = self.hidden2tag_senti(features_tmp)
return sub_output, obj_output, senti_output
class CustomizeSequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(CustomizeSequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_obj = nn.Linear(hidden_size, int(hidden_size / 2))
self.linear_a = nn.Linear(hidden_size, self.tag_size)
self.linear_o = nn.Linear(hidden_size, self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
# share
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
# ATE
features_tmp_a = self.hidden2tag_sub(input_features)
features_tmp_a = nn.ReLU()(features_tmp)
features_tmp_a = self.dropout(features_tmp)
# OTE
features_tmp_o = self.hidden2tag_obj(input_features)
features_tmp_o = nn.ReLU()(features_tmp)
features_tmp_o = self.dropout(features_tmp)
# cat
features_for_a = torch.cat([features_tmp, features_tmp_a], -1)
features_for_o = torch.cat([features_tmp, features_tmp_o], -1)
# classifier
sub_output = self.linear_a(features_for_a)
obj_output = self.linear_a(features_for_o)
return sub_output, obj_output
class SequenceLabelForTriple(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForTriple, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class MultiNonLinearClassifier(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(MultiNonLinearClassifier, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
features_output = self.hidden2tag(features_tmp)
return features_output
class SequenceLabelForGrid(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForGrid, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class PairGeneration(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, features: int, bias: bool=False, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super(PairGeneration, self).__init__()
self.features = features
self.weight1 = nn.Parameter(torch.empty((features, features), **factory_kwargs))
self.weight2 = nn.Parameter(torch.empty((features, features), **factory_kwargs))
if bias:
self.bias1 = nn.Parameter(torch.empty(features, **factory_kwargs))
self.bias2 = nn.Parameter(torch.empty(features, **factory_kwargs))
else:
self.register_parameter('bias1', None)
self.register_parameter('bias2', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight1, a=math.sqrt(5))
init.kaiming_uniform_(self.weight2, a=math.sqrt(5))
if self.bias1 is not None and self.bias2 is not None:
fan_in1, _ = init._calculate_fan_in_and_fan_out(self.weight1)
fan_in2, _ = init._calculate_fan_in_and_fan_out(self.weight2)
bound1 = 1 / math.sqrt(fan_in1) if fan_in1 > 0 else 0
bound2 = 1 / math.sqrt(fan_in2) if fan_in2 > 0 else 0
init.uniform_(self.bias1, -bound1, bound1)
init.uniform_(self.bias2, -bound2, bound2)
def forward(self, input: Tensor) -> Tensor:
hidden1 = F.linear(input, self.weight1, self.bias1)
hidden2 = F.linear(input, self.weight2, self.bias2)
if self.bias1 is not None and self.bias2 is not None:
hidden1 = hidden1 + self.bias1
hidden2 = hidden2 + self.bias2
output = torch.matmul(hidden1, hidden2.permute(0, 2, 1))
return output
class PairGeneration0(nn.Module):
# def __init__(self, features, bias=False):
def __init__(self):
super(PairGeneration0, self).__init__() # 32,13,300 32,300,13
# self.features = features
# self.weight = nn.Parameter(torch.FloatTensor(features, features))
# if bias:
# self.bias = nn.Parameter(torch.FloatTensor(features))
# else:
# self.register_parameter('bias', None)
def forward(self, text):
hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)
hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)
output = torch.cat((hidden_1, hidden_2),-1)
return output
class PairGeneration1(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool=False, device = None, dtype = None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(PairGeneration1, self).__init__()
self.in_features, self.out_features = in_features, out_features
self.weight = nn.Parameter(torch.empty((out_features, in_features*2), **factory_kwargs))
if bias:
self.bias = nn.Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, text):
hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)
hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)
output = torch.cat((hidden_1, hidden_2),-1)
output = F.linear(output, self.weight, self.bias)
return output
class GCNLayer(nn.Module):
def __init__(self, in_dim, out_dim, acti=True):
super(GCNLayer, self).__init__()
self.linear = nn.Linear(in_dim, out_dim, bias=False) # bias = False is also ok.
if acti:
# self.acti = nn.ReLU(inplace=True)
self.acti = nn.PReLU()
else:
self.acti = None
def forward(self, F):
output = self.linear(F)
if not self.acti:
return output
return self.acti(output)
class GCNforFeature_1(nn.Module):
def __init__(self, input_dim, hidden_dim, p):
super(GCNforFeature_1, self).__init__()
self.gcn_layer1 = GCNLayer(input_dim, hidden_dim)
self.dropout = nn.Dropout(p)
def forward(self, A, X):
X = self.dropout(X.float())
F = torch.matmul(A, X)
output = self.gcn_layer1(F)
return output
class GCNforFeature_2(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, p):
super(GCNforFeature_2, self).__init__()
self.gcn_layer1 = GCNLayer(input_dim, hidden_dim)
self.gcn_layer2 = GCNLayer(hidden_dim, out_dim)
self.dropout = nn.Dropout(p)
def forward(self, A, X):
X = self.dropout(X.float())
F = torch.matmul(A, X)
F = self.gcn_layer1(F)
F = self.dropout(F.float())
F = torch.matmul(A, F)
output = self.gcn_layer2(F)
return output
class GCNforSequence(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, p):
super(GCNforSequence, self).__init__()
self.gcn_layer1 = GCNLayer(input_dim, hidden_dim, True)
self.gcn_layer2 = GCNLayer(hidden_dim, out_dim, False)
self.gcn_layer3 = GCNLayer(hidden_dim, out_dim, False)
self.dropout = nn.Dropout(p)
def forward(self, A, X):
X = self.dropout(X.float())
F = torch.matmul(A, X)
F = self.gcn_layer1(F)
F1 = self.dropout(F.float())
F1 = torch.matmul(A, F1)
output1 = self.gcn_layer2(F1)
F2 = self.dropout(F.float())
F2 = torch.matmul(A, F2)
output2 = self.gcn_layer3(F2)
return output1, output2
class GCNforTriple(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, class_num, p):
super(GCNforTriple, self).__init__()
self.gcn_layer1 = GCNLayer(input_dim, hidden_dim, True)
self.gcn_layer2 = GCNLayer(hidden_dim, out_dim, False)
self.gcn_layer3 = GCNLayer(hidden_dim, out_dim, False)
self.pair_generation = PairGeneration0()
self.dropout = nn.Dropout(p)
self.linear1 = nn.Linear(out_dim*2, class_num, bias=False)
self.linear2 = nn.Linear(out_dim*2, class_num+1, bias=False)
def forward(self, A, X):
X = self.dropout(X.float())
F = torch.matmul(A, X)
F = self.gcn_layer1(F)
F1 = self.dropout(F.float())
F1 = torch.matmul(A, F1)
output1 = self.gcn_layer2(F1)
pair_text = self.pair_generation(output1)
# pair_text = pair_text[:, :sentence_len, :sentence_len, :]
# pair_probs = self.linear1(pair_text)
F2 = self.dropout(F.float())
F2 = torch.matmul(A, F2)
output2 = self.gcn_layer3(F2)
triple_text = self.pair_generation(output2)
# triple_text = triple_text[:, :sentence_len, :sentence_len, :]
# triple_probs = self.linear2(triple_text)
# return pair_probs, triple_probs
return pair_text, triple_text
class Atten_adj(nn.Module):
def __init__(self, input_dim):
super(Atten_adj, self).__init__()
# self.dropout = nn.Dropout(p)
self.weight = nn.Parameter(torch.FloatTensor(input_dim, input_dim))
def forward(self, attention_feature):
attention = torch.matmul(attention_feature, self.weight)
attention = torch.matmul(attention, attention_feature.permute(0, 2, 1))
attention = F.softmax(attention, -1)
return attention
class TS0(nn.Module):
def __init__(self, embedding_matrix, opt):
super(TS0, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.lstm = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
self.lstm_ = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
self.text_embed_dropout = nn.Dropout(0.5)
self.pairgeneration = PairGeneration0()
self.gridgeneration = PairGeneration(900)
self.pairgeneration1 = PairGeneration1(900, 900)
self.gcn0 = GCNforFeature_1(600, 300, 0.5)
self.gcn1 = GCNforFeature_1(600, 300, 0.5)
self.gcn2 = GCNforFeature_2(600, 300, 300, 0.5)
self.aspect_opinion_classifier = SequenceLabelForAO(900, 3, 0.5)
self.triple_classifier = SequenceLabelForTriple(1800, 3, 0.5)
self.aspect_opinion_sequence_classifier = GCNforSequence(600, 300, 3, 0.5)
self.pair_triple_classifier = GCNforTriple(600, 300, 150, 3, 0.5)
self.pair_classifier = nn.Linear(300, 3)
self.triplet_classifier = nn.Linear(300, 4)
self.atten_adj = Atten_adj(600)
def forward(self, inputs, mask):
# input
text_indices, mask, global_adj, relevant_sentences, relevant_sentences_presentation,_, _, _, _, _ = inputs
# prepare
batch_size = text_indices.shape[0]
sentence_len = text_indices.shape[1]
rele_sen_num = relevant_sentences.shape[1]
rele_sen_len = relevant_sentences_presentation.shape[-1]
# process global adj to get formal adj and norm
formal_global_adj = generate_formal_adj(global_adj)
norm_global_adj = preprocess_adj(formal_global_adj)
# get sentence mask
mask_ = mask.view(-1,1)
# input sentnece s_0
text_len = torch.sum(text_indices != 0, dim=-1)
word_embeddings = self.embed(text_indices)
text = self.text_embed_dropout(word_embeddings)
text_out, (_, _) = self.lstm(text, text_len.cpu()) # 32, 13, 600
# relevant sentences, for every sentence s_0, there are T relevant sentences s_1, s_2, ..., s_T
relevant_sentences_presentation_ = torch.reshape(relevant_sentences_presentation, (-1, relevant_sentences_presentation.shape[-1]))
sentence_text_len = torch.sum(relevant_sentences_presentation_!= 0, dim=-1)
sentence_embedding = self.embed(relevant_sentences_presentation)
sentence_text_ = self.text_embed_dropout(sentence_embedding)
sentence_text = torch.reshape(sentence_text_, (-1, sentence_text_.shape[-2], sentence_text_.shape[-1]))
ones = torch.ones_like(sentence_text_len)
sentence_text_out, (sentence_text_out1, b_) = self.lstm_(sentence_text, torch.where(sentence_text_len <= 0, ones, sentence_text_len).cpu())
sentence_text_out = torch.reshape(sentence_text_out, (relevant_sentences.shape[0], relevant_sentences.shape[1], sentence_text_out.shape[-2], sentence_text_out.shape[-1]))
sentence_text_out1 = torch.reshape(sentence_text_out1, (relevant_sentences.shape[0], relevant_sentences.shape[1], -1))
# attention = F.softmax(torch.matmul(sentence_text_out, sentence_text_out.permute(0,1,3,2)), dim=-1)
# sentence_text_out2 = torch.matmul(attention, sentence_text_out).sum(2)
# process formal features to match the formal adj
formal_global_features = torch.cat([text_out, sentence_text_out1], 1)
# use attention to construct graph
# attention = self.atten_adj(formal_global_features)
# norm_global_adj = preprocess_adj(attention)
# GCN with local global graph
if self.opt.gcn_layers_in_graph0 == 1:
global_text_out = self.gcn1(norm_global_adj, formal_global_features)[:, :sentence_len, :]
elif self.opt.gcn_layers_in_graph0 == 2:
global_text_out = self.gcn2(norm_global_adj, formal_global_features)[:, :sentence_len, :]
# global_text_out_tem = torch.matmul(norm_global_adj, formal_global_features)[:, :sentence_len, :]
'''aspect_probs, opinion_probs = self.aspect_opinion_sequence_classifier(norm_global_adj, formal_global_features)
pair_text, triple_text = self.pair_triple_classifier(norm_global_adj, formal_global_features)
aspect_probs, opinion_probs = aspect_probs[:, :sentence_len, :], opinion_probs[:, :sentence_len, :]
pair_text, triple_text = pair_text[:, :sentence_len, :sentence_len, :], triple_text[:, :sentence_len, :sentence_len, :]
pair_probs, triple_probs = self.pair_classifier(pair_text), self.triplet_classifier(triple_text)'''
# unified features
unified_text = torch.cat([text_out.float(), global_text_out.float()], -1)
# pair generation
pair_text = self.pairgeneration(unified_text)
# AE and OE scores (BIO tagging)
aspect_probs, opinion_probs = self.aspect_opinion_classifier(unified_text.float())
aspect_probs, opinion_probs = aspect_probs.contiguous().view(-1, 3), opinion_probs.contiguous().view(-1, 3)
# pair mask for pair prediction (according to aspect and opinion probs)
# pair_mask = torch.unsqueeze((aspect_probs[:,-1]+aspect_probs[:,-2]).view(text_out.shape[0],-1),1).repeat(1,text_out.shape[1],1)\
# + torch.unsqueeze((opinion_probs[:,-1]+opinion_probs[:,-2]).view(text_out.shape[0],-1), 2).repeat(1,1,text_out.shape[1])
# pair_mask_ = pair_mask.view(-1,1)
# pair_mask_grid = torch.unsqueeze(pair_mask,-1).repeat(1,1,1,pair_text.shape[-1])
# pair scores
pair_probs_, triple_probs_ = self.triple_classifier(pair_text.float())
pair_probs = pair_probs_.contiguous().view(-1, 3)
triple_probs = triple_probs_.contiguous().view(-1, 4)
return aspect_probs, opinion_probs, pair_probs, triple_probs
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,960
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/models/ts1.py
|
# # # -*- coding: utf-8 -*-
# # from layers.dynamic_rnn import DynamicLSTM
# # import torch
# # import torch.nn as nn
# # import pdb
# # import torch.nn.functional as F
# # import numpy as np
# # def generate_formal_adj(init_adj):
# # '''input: a simple adj with a size of (row, column)
# # output: a complete and formal adj with a size of (row+column, row+column)'''
# # batch, row, column = init_adj.shape
# # # up left matrix (batch, row, row)
# # lu = torch.tensor(np.zeros((batch, row, row)).astype('float32')).cuda()
# # # up right (batch, row, column)
# # ru = init_adj.cuda()
# # # down left (batch, column, row)
# # ld = init_adj.transpose(1, 2).cuda()
# # # down right (batch, column, column)
# # rd = torch.tensor(np.zeros((batch, column, column)).astype('float32')).cuda()
# # # up (batch, row, row+column)
# # up = torch.cat([lu.float(), ru.float()], -1).cuda()
# # # down (batch, column, row+column)
# # down = torch.cat([ld.float(), rd.float()], -1).cuda()
# # # final (batch, row+column, row+column)
# # final = torch.cat([up,down],1).cuda()
# # return final.cuda()
# # def preprocess_adj(A):
# # '''
# # for batch data
# # Pre-process adjacency matrix
# # :param A: adjacency matrix
# # :return:
# # '''
# # # prepare
# # assert A.shape[-1] == A.shape[-2]
# # batch = A.shape[0]
# # num = A.shape[-1]
# # # generate eye
# # I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
# # #
# # A_hat = A.cuda() + I
# # #
# # D_hat_diag = torch.sum(A_hat.cuda(), axis=-1)
# # #
# # D_hat_diag_inv_sqrt = torch.pow(D_hat_diag.cuda(), -0.5)
# # # inf
# # D_hat_diag_inv_sqrt = torch.where(torch.isinf(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
# # D_hat_diag_inv_sqrt = torch.where(torch.isnan(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
# # #
# # tem_I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
# # D_hat_diag_inv_sqrt_ = D_hat_diag_inv_sqrt.unsqueeze(-1).repeat(1,1,num).cuda()
# # D_hat_inv_sqrt = D_hat_diag_inv_sqrt_ * tem_I
# # #
# # return torch.matmul(torch.matmul(D_hat_inv_sqrt.cuda(), A_hat.cuda()), D_hat_inv_sqrt.cuda())
# # class GraphConvolution(nn.Module):
# # """
# # Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
# # """
# # def __init__(self, in_features, out_features, bias=True):
# # super(GraphConvolution, self).__init__()
# # self.in_features = in_features
# # self.out_features = out_features
# # self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
# # if bias:
# # self.bias = nn.Parameter(torch.FloatTensor(out_features))
# # else:
# # self.register_parameter('bias', None)
# # def forward(self, text, adj):
# # hidden = torch.matmul(text, self.weight)
# # denom = torch.sum(adj, dim=2, keepdim=True) + 1
# # # adj = torch.tensor(adj)
# # adj = torch.tensor(adj, dtype=torch.float32)
# # # hidden = torch.tensor(hidden)
# # hidden = torch.tensor(hidden, dtype=torch.float32)
# # output = torch.matmul(adj.cuda(), hidden.cuda()) / denom.cuda()
# # # print(output.shape)
# # # print(self.bias.shape)
# # if self.bias is not None:
# # return output + self.bias
# # else:
# # return output
# # class PairGeneration(nn.Module):
# # """
# # Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
# # """
# # def __init__(self, features, bias=False):
# # super(PairGeneration, self).__init__() # 32,13,300 32,300,13
# # self.features = features
# # # self.out_features = out_features
# # self.weight = nn.Parameter(torch.FloatTensor(features, features))
# # if bias:
# # self.bias = nn.Parameter(torch.FloatTensor(features))
# # else:
# # self.register_parameter('bias', None)
# # def forward(self, text):
# # hidden = torch.matmul(text.float(), self.weight)
# # # print(hidden.shape)
# # # denom = torch.sum(adj, dim=2, keepdim=True) + 1
# # # adj = torch.tensor(adj, dtype=torch.float32)
# # hidden_ = torch.tensor(hidden, dtype=torch.float32)
# # # print(hidden_.shape)
# # output = torch.matmul(hidden_, hidden.permute(0,2,1))
# # # print(output.shape)
# # if self.bias is not None:
# # return output + self.bias
# # else:
# # return output
# # class PairGeneration0(nn.Module):
# # def __init__(self, features, bias=False):
# # super(PairGeneration0, self).__init__() # 32,13,300 32,300,13
# # self.features = features
# # # self.out_features = out_features
# # self.weight = nn.Parameter(torch.FloatTensor(features, features))
# # if bias:
# # self.bias = nn.Parameter(torch.FloatTensor(features))
# # else:
# # self.register_parameter('bias', None)
# # def forward(self, text):
# # hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)
# # hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)
# # output = torch.cat((hidden_1, hidden_2),-1)
# # return output
# # class GCNLayer(nn.Module):
# # def __init__(self, in_dim, out_dim, acti=False):
# # super(GCNLayer, self).__init__()
# # self.linear = nn.Linear(in_dim, out_dim) # bias = False is also ok.
# # if acti:
# # self.acti = nn.ReLU(inplace=True)
# # else:
# # self.acti = None
# # def forward(self, F):
# # output = self.linear(F)
# # if not self.acti:
# # return output
# # return self.acti(output)
# # class GCN(nn.Module):
# # def __init__(self, input_dim, hidden_dim, num_classes, p):
# # super(GCN, self).__init__()
# # self.gcn_layer1 = GCNLayer(input_dim, hidden_dim)
# # self.gcn_layer2 = GCNLayer(hidden_dim, num_classes, acti=False)
# # self.dropout = nn.Dropout(p)
# # def forward(self, A, X):
# # X = self.dropout(X.float().cuda())
# # F = torch.matmul(A.cuda(), X.cuda())
# # F = self.gcn_layer1(F.cuda())
# # output = F
# # # F = self.dropout(F.cuda())
# # # F = torch.matmul(A, F.cuda())
# # # output = self.gcn_layer2(F.cuda())
# # return output
# # class TS1(nn.Module):
# # def __init__(self, embedding_matrix, opt):
# # super(TS1, self).__init__()
# # self.opt = opt
# # self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
# # self.lstm = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
# # self.lstm_ = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
# # self.gcn0 = GCN(600, 300, 3, 0.3)
# # self.gcn1 = GCN(600, 600, 3, 0.3)
# # self.sigmoid = nn.Sigmoid()
# # self.text_embed_dropout = nn.Dropout(0.3)
# # self.soft = nn.Softmax(dim=-1)
# # self.pairgeneration = PairGeneration0(600)
# # self.fc_aspect_h = nn.Linear(900, 300)
# # self.fc_opinion_h = nn.Linear(900, 300)
# # self.fc_pair_h = nn.Linear(1800, 600)
# # self.fc_aspect = nn.Linear(300, 3)
# # self.fc_opinion = nn.Linear(300, 3)
# # self.fc_pair = nn.Linear(600, 3)
# # def forward(self, inputs, mask):
# # # input
# # text_indices, mask, local_adj, global_adj, global_adj1, relevant_sentences, relevant_sentences_presentation, m_, n_, local_adj_pmi,_,_,_ = inputs
# # # prepare
# # batch_size = text_indices.shape[0]
# # sentence_len = text_indices.shape[1]
# # rele_sen_num = relevant_sentences.shape[1]
# # rele_sen_len = relevant_sentences_presentation.shape[-1]
# # # process input
# # # global_adj1 = torch.reshape(global_adj1, (batch_size, rele_sen_num, rele_sen_num*rele_sen_len))
# # global_adj1 = torch.reshape(global_adj1.permute(0,2,1,3), (batch_size, rele_sen_num, rele_sen_num*rele_sen_len))
# # # global_adj1_ = global_adj1.permute(0,2,1,3)
# # # process global adj to get formal adj and norm
# # formal_global_adj = generate_formal_adj(global_adj)
# # norm_global_adj = preprocess_adj(formal_global_adj)
# # formal_global_adj1 = generate_formal_adj(global_adj1)
# # norm_global_adj1 = preprocess_adj(formal_global_adj1)
# # # get sentence mask
# # mask_ = mask.view(-1,1)
# # # input sentnece s_0
# # text_len = torch.sum(text_indices != 0, dim=-1)
# # word_embeddings = self.embed(text_indices)
# # text = self.text_embed_dropout(word_embeddings)
# # text_out, (_, _) = self.lstm(text, text_len) # 32, 13, 600
# # # relevant sentences, for every sentence s_0, there are T relevant sentences s_1, s_2, ..., s_T
# # relevant_sentences_presentation_ = torch.reshape(relevant_sentences_presentation, (-1, relevant_sentences_presentation.shape[-1]))
# # sentence_text_len = torch.sum(relevant_sentences_presentation_!= 0, dim=-1)
# # sentence_embedding = self.embed(relevant_sentences_presentation)
# # sentence_text_ = self.text_embed_dropout(sentence_embedding)
# # sentence_text = torch.reshape(sentence_text_, (-1, sentence_text_.shape[-2], sentence_text_.shape[-1]))
# # ones = torch.ones_like(sentence_text_len)
# # sentence_text_out, (sentence_text_out1, b_) = self.lstm_(sentence_text, torch.where(sentence_text_len <= 0, ones, sentence_text_len))
# # sentence_text_out = torch.reshape(sentence_text_out, (relevant_sentences.shape[0], relevant_sentences.shape[1], sentence_text_out.shape[-2], sentence_text_out.shape[-1]))
# # sentence_text_out1 = torch.reshape(sentence_text_out1, (relevant_sentences.shape[0], relevant_sentences.shape[1], -1))
# # # process formal features to match the formal adj; first row then column
# # # global graph: row -> relevant sentence feature, column -> relevant sentence word feature
# # relevant_sentence_features = sentence_text_out1
# # relevant_sentence_word_features = torch.reshape(sentence_text_out, (batch_size, rele_sen_num*rele_sen_len, -1))
# # formal_global_features1 = torch.cat([relevant_sentence_features, relevant_sentence_word_features], 1)
# # # GCN with local global graph1 to get relevant sentences features
# # global_text_out1 = self.gcn1(norm_global_adj1, formal_global_features1)[:, :rele_sen_num, :]
# # # process formal features to match the formal adj
# # # pdb.set_trace()
# # formal_global_features = torch.cat([text_out, global_text_out1], 1)
# # # GCN with local global graph
# # global_text_out = self.gcn0(norm_global_adj, formal_global_features)[:, :sentence_len, :]
# # # unified features
# # unified_text = torch.cat([text_out.float(), global_text_out.float()], -1)
# # # pair generation
# # pair_text = self.pairgeneration(unified_text)
# # # AE and OE scores
# # aspect_probs = self.fc_aspect(self.fc_aspect_h(unified_text.float())).contiguous().view(-1, 3)
# # opinion_probs = self.fc_opinion(self.fc_opinion_h(unified_text.float())).contiguous().view(-1, 3)
# # # pair mask
# # pair_mask = torch.unsqueeze((aspect_probs[:,-1]+aspect_probs[:,-2]).view(text_out.shape[0],-1),1).repeat(1,text_out.shape[1],1)\
# # + torch.unsqueeze((opinion_probs[:,-1]+opinion_probs[:,-2]).view(text_out.shape[0],-1),2).repeat(1,1,text_out.shape[1])
# # pair_mask_ = pair_mask.view(-1,1)
# # pair_mask_grid = torch.unsqueeze(pair_mask,-1).repeat(1,1,1,pair_text.shape[-1])
# # # pair scores
# # pair_probs = self.fc_pair(self.fc_pair_h(pair_text.float()*pair_mask_grid)).contiguous().view(-1, 3)
# # return aspect_probs, opinion_probs, pair_probs
# # -*- coding: utf-8 -*-
# from layers.dynamic_rnn import DynamicLSTM
# import torch
# import torch.nn as nn
# import pdb
# import torch.nn.functional as F
# import numpy as np
# def generate_formal_adj(init_adj):
# '''input: a simple adj with a size of (row, column)
# output: a complete and formal adj with a size of (row+column, row+column)'''
# batch, row, column = init_adj.shape
# # up left matrix (batch, row, row)
# lu = torch.tensor(np.zeros((batch, row, row)).astype('float32')).cuda()
# # up right (batch, row, column)
# ru = init_adj.cuda()
# # down left (batch, column, row)
# ld = init_adj.transpose(1, 2).cuda()
# # down right (batch, column, column)
# rd = torch.tensor(np.zeros((batch, column, column)).astype('float32')).cuda()
# # up (batch, row, row+column)
# up = torch.cat([lu.float(), ru.float()], -1).cuda()
# # down (batch, column, row+column)
# down = torch.cat([ld.float(), rd.float()], -1).cuda()
# # final (batch, row+column, row+column)
# final = torch.cat([up,down],1).cuda()
# return final.cuda()
# def preprocess_adj(A):
# '''
# for batch data
# Pre-process adjacency matrix
# :param A: adjacency matrix
# :return:
# '''
# # prepare
# assert A.shape[-1] == A.shape[-2]
# batch = A.shape[0]
# num = A.shape[-1]
# # generate eye
# I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
# #
# A_hat = A.cuda() + I
# #
# D_hat_diag = torch.sum(A_hat.cuda(), axis=-1)
# #
# D_hat_diag_inv_sqrt = torch.pow(D_hat_diag.cuda(), -0.5)
# # inf
# D_hat_diag_inv_sqrt = torch.where(torch.isinf(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
# D_hat_diag_inv_sqrt = torch.where(torch.isnan(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
# #
# tem_I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
# D_hat_diag_inv_sqrt_ = D_hat_diag_inv_sqrt.unsqueeze(-1).repeat(1,1,num).cuda()
# D_hat_inv_sqrt = D_hat_diag_inv_sqrt_ * tem_I
# #
# return torch.matmul(torch.matmul(D_hat_inv_sqrt.cuda(), A_hat.cuda()), D_hat_inv_sqrt.cuda())
# class GraphConvolution(nn.Module):
# """
# Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
# """
# def __init__(self, in_features, out_features, bias=True):
# super(GraphConvolution, self).__init__()
# self.in_features = in_features
# self.out_features = out_features
# self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
# if bias:
# self.bias = nn.Parameter(torch.FloatTensor(out_features))
# else:
# self.register_parameter('bias', None)
# def forward(self, text, adj):
# hidden = torch.matmul(text, self.weight)
# denom = torch.sum(adj, dim=2, keepdim=True) + 1
# # adj = torch.tensor(adj)
# adj = torch.tensor(adj, dtype=torch.float32)
# # hidden = torch.tensor(hidden)
# hidden = torch.tensor(hidden, dtype=torch.float32)
# output = torch.matmul(adj.cuda(), hidden.cuda()) / denom.cuda()
# # print(output.shape)
# # print(self.bias.shape)
# if self.bias is not None:
# return output + self.bias
# else:
# return output
# class PairGeneration(nn.Module):
# """
# Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
# """
# def __init__(self, features, bias=False):
# super(PairGeneration, self).__init__() # 32,13,300 32,300,13
# self.features = features
# # self.out_features = out_features
# self.weight = nn.Parameter(torch.FloatTensor(features, features))
# if bias:
# self.bias = nn.Parameter(torch.FloatTensor(features))
# else:
# self.register_parameter('bias', None)
# def forward(self, text):
# hidden = torch.matmul(text.float(), self.weight)
# # print(hidden.shape)
# # denom = torch.sum(adj, dim=2, keepdim=True) + 1
# # adj = torch.tensor(adj, dtype=torch.float32)
# hidden_ = torch.tensor(hidden, dtype=torch.float32)
# # print(hidden_.shape)
# output = torch.matmul(hidden_, hidden.permute(0,2,1))
# # print(output.shape)
# if self.bias is not None:
# return output + self.bias
# else:
# return output
# class PairGeneration0(nn.Module):
# def __init__(self, features, bias=False):
# super(PairGeneration0, self).__init__() # 32,13,300 32,300,13
# self.features = features
# # self.out_features = out_features
# self.weight = nn.Parameter(torch.FloatTensor(features, features))
# if bias:
# self.bias = nn.Parameter(torch.FloatTensor(features))
# else:
# self.register_parameter('bias', None)
# def forward(self, text):
# hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)
# hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)
# output = torch.cat((hidden_1, hidden_2),-1)
# return output
# class GCNLayer(nn.Module):
# def __init__(self, in_dim, out_dim, acti=False):
# super(GCNLayer, self).__init__()
# self.linear = nn.Linear(in_dim, out_dim) # bias = False is also ok.
# if acti:
# self.acti = nn.ReLU(inplace=True)
# else:
# self.acti = None
# def forward(self, F):
# output = self.linear(F)
# if not self.acti:
# return output
# return self.acti(output)
# class GCN(nn.Module):
# def __init__(self, input_dim, hidden_dim, num_classes, p):
# super(GCN, self).__init__()
# self.gcn_layer1 = GCNLayer(input_dim, hidden_dim)
# self.gcn_layer2 = GCNLayer(hidden_dim, num_classes, acti=False)
# self.dropout = nn.Dropout(p)
# def forward(self, A, X):
# X = self.dropout(X.float().cuda())
# F = torch.matmul(A.cuda(), X.cuda())
# F = self.gcn_layer1(F.cuda())
# output = F
# # F = self.dropout(F.cuda())
# # F = torch.matmul(A, F.cuda())
# # output = self.gcn_layer2(F.cuda())
# return output
# class TS1(nn.Module):
# def __init__(self, embedding_matrix, opt):
# super(TS1, self).__init__()
# self.opt = opt
# self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
# self.lstm = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
# self.lstm_ = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
# self.gcn0 = GCN(600, 300, 3, 0.3)
# self.gcn2 = GCN(600, 300, 3, 0.3)
# self.gcn1 = GCN(600, 600, 3, 0.3)
# self.sigmoid = nn.Sigmoid()
# self.text_embed_dropout = nn.Dropout(0.3)
# self.soft = nn.Softmax(dim=-1)
# self.pairgeneration = PairGeneration0(1200)
# self.fc_aspect_h = nn.Linear(1200, 600)
# self.fc_opinion_h = nn.Linear(1200, 600)
# self.fc_sentiment_h = nn.Linear(1200, 600)
# self.fc_pair_h = nn.Linear(2400, 1200)
# self.fc_pair_sentiment_h = nn.Linear(2400, 1200)
# self.fc_aspect = nn.Linear(600, 3)
# self.fc_opinion = nn.Linear(600, 3)
# self.fc_sentiment = nn.Linear(600, 4)
# self.fc_pair = nn.Linear(1200, 3)
# self.fc_pair_sentiment = nn.Linear(1200, 4)
# def forward(self, inputs, mask):
# # input
# text_indices, mask, local_adj, global_adj, global_adj1, relevant_sentences, relevant_sentences_presentation, m_, n_, local_adj_pmi,_,_,_ = inputs
# # prepare
# batch_size = text_indices.shape[0]
# sentence_len = text_indices.shape[1]
# rele_sen_num = relevant_sentences.shape[1]
# rele_sen_len = relevant_sentences_presentation.shape[-1]
# # process global adj to get formal adj and norm
# formal_global_adj = generate_formal_adj(global_adj)
# norm_global_adj = preprocess_adj(formal_global_adj)
# global_adj1 = torch.reshape(global_adj1.permute(0,2,1,3), (batch_size, rele_sen_num, rele_sen_num*rele_sen_len))
# formal_global_adj1 = generate_formal_adj(global_adj1)
# norm_global_adj1 = preprocess_adj(formal_global_adj1)
# # get sentence mask
# mask_ = mask.view(-1,1)
# '''get initial features'''
# # input sentnece s_0
# text_len = torch.sum(text_indices != 0, dim=-1)
# word_embeddings = self.embed(text_indices)
# text = self.text_embed_dropout(word_embeddings)
# text_out, (_, _) = self.lstm(text, text_len.cpu()) # 32, 13, 600
# # relevant sentences, for every sentence s_0, there are T relevant sentences s_1, s_2, ..., s_T
# relevant_sentences_presentation_ = torch.reshape(relevant_sentences_presentation, (-1, relevant_sentences_presentation.shape[-1]))
# sentence_text_len = torch.sum(relevant_sentences_presentation_!= 0, dim=-1)
# sentence_embedding = self.embed(relevant_sentences_presentation)
# sentence_text_ = self.text_embed_dropout(sentence_embedding)
# sentence_text = torch.reshape(sentence_text_, (-1, sentence_text_.shape[-2], sentence_text_.shape[-1]))
# ones = torch.ones_like(sentence_text_len)
# sentence_text_out, (sentence_text_out1, b_) = self.lstm_(sentence_text, torch.where(sentence_text_len <= 0, ones, sentence_text_len).cpu())
# sentence_text_out = torch.reshape(sentence_text_out, (relevant_sentences.shape[0], relevant_sentences.shape[1], sentence_text_out.shape[-2], sentence_text_out.shape[-1]))
# sentence_text_out1 = torch.reshape(sentence_text_out1, (relevant_sentences.shape[0], relevant_sentences.shape[1], -1))
# '''global graph0 to get features of words in sentence0, using the initial rele sentences features'''
# # process formal features to match the formal adj
# formal_global_features = torch.cat([text_out, sentence_text_out1], 1)
# # GCN with local global graph
# global_text_out = self.gcn0(norm_global_adj, formal_global_features)[:, :sentence_len, :]
# rele_sentences_features = global_text_out
# '''global graph1 to get relevant sentneces features using the initial rele sentence words feature,
# then get the features of words in sentence0'''
# # step 1 get the sentence fetures using words feature of rele sentences, global graph 1
# relevant_sentence_features = sentence_text_out1
# relevant_sentence_word_features = torch.reshape(sentence_text_out, (batch_size, rele_sen_num*rele_sen_len, -1))
# formal_global_features1 = torch.cat([relevant_sentence_features, relevant_sentence_word_features], 1)
# global_text_out1 = self.gcn1(norm_global_adj1, formal_global_features1)[:, :rele_sen_num, :]
# rele_sentences_features1 = self.gcn2(norm_global_adj, torch.cat([text_out, global_text_out1], 1))[:, :sentence_len, :]
# # unified features
# unified_text = torch.cat([text_out.float(), rele_sentences_features.float(), rele_sentences_features1.float()], -1)
# # pair generation
# pair_text = self.pairgeneration(unified_text)
# # AE and OE scores
# aspect_probs = self.fc_aspect(self.fc_aspect_h(unified_text.float())).contiguous().view(-1, 3)
# opinion_probs = self.fc_opinion(self.fc_opinion_h(unified_text.float())).contiguous().view(-1, 3)
# sentiment_probs = self.fc_sentiment(self.fc_sentiment_h(unified_text.float())).contiguous().view(-1, 4)
# # pair mask
# pair_mask = torch.unsqueeze((aspect_probs[:,-1]+aspect_probs[:,-2]).view(text_out.shape[0],-1),1).repeat(1,text_out.shape[1],1)\
# + torch.unsqueeze((opinion_probs[:,-1]+opinion_probs[:,-2]).view(text_out.shape[0],-1),2).repeat(1,1,text_out.shape[1])
# pair_mask_ = pair_mask.view(-1,1)
# pair_mask_grid = torch.unsqueeze(pair_mask,-1).repeat(1,1,1,pair_text.shape[-1])
# # pair scores
# pair_probs = self.fc_pair(self.fc_pair_h(pair_text.float()*pair_mask_grid)).contiguous().view(-1, 3)
# # pair_probs = self.fc_pair(self.fc_pair_h(pair_text.float())).contiguous().view(-1, 3)
# # pair sentiment scores
# pair_sentiment_probs = self.fc_pair_sentiment(self.fc_pair_sentiment_h(pair_text.float()*pair_mask_grid)).contiguous().view(-1, 4)
# return aspect_probs, opinion_probs, sentiment_probs, pair_probs, pair_sentiment_probs
# -*- coding: utf-8 -*-
from layers.dynamic_rnn import DynamicLSTM
import torch
import torch.nn as nn
import pdb
import torch.nn.functional as F
import numpy as np
def generate_formal_adj(init_adj):
'''input: a simple adj with a size of (row, column)
output: a complete and formal adj with a size of (row+column, row+column)'''
batch, row, column = init_adj.shape
# up left matrix (batch, row, row)
lu = torch.tensor(np.zeros((batch, row, row)).astype('float32')).cuda()
# up right (batch, row, column)
ru = init_adj.cuda()
# down left (batch, column, row)
ld = init_adj.transpose(1, 2).cuda()
# down right (batch, column, column)
rd = torch.tensor(np.zeros((batch, column, column)).astype('float32')).cuda()
# up (batch, row, row+column)
up = torch.cat([lu.float(), ru.float()], -1).cuda()
# down (batch, column, row+column)
down = torch.cat([ld.float(), rd.float()], -1).cuda()
# final (batch, row+column, row+column)
final = torch.cat([up,down],1).cuda()
return final.cuda()
def preprocess_adj(A):
'''
for batch data
Pre-process adjacency matrix
:param A: adjacency matrix
:return:
'''
# prepare
assert A.shape[-1] == A.shape[-2]
batch = A.shape[0]
num = A.shape[-1]
# generate eye
I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
#
A_hat = A.cuda() + I
#
D_hat_diag = torch.sum(A_hat.cuda(), axis=-1)
#
D_hat_diag_inv_sqrt = torch.pow(D_hat_diag.cuda(), -0.5)
# inf
D_hat_diag_inv_sqrt = torch.where(torch.isinf(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
D_hat_diag_inv_sqrt = torch.where(torch.isnan(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
#
tem_I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
D_hat_diag_inv_sqrt_ = D_hat_diag_inv_sqrt.unsqueeze(-1).repeat(1,1,num).cuda()
D_hat_inv_sqrt = D_hat_diag_inv_sqrt_ * tem_I
#
return torch.matmul(torch.matmul(D_hat_inv_sqrt.cuda(), A_hat.cuda()), D_hat_inv_sqrt.cuda())
class SequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
# features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class SequenceLabelForAOS(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAOS, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_senti = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
# features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
senti_output = self.hidden2tag_senti(features_tmp)
return sub_output, obj_output, senti_output
class CustomizeSequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(CustomizeSequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_obj = nn.Linear(hidden_size, int(hidden_size / 2))
self.linear_a = nn.Linear(hidden_size, self.tag_size)
self.linear_o = nn.Linear(hidden_size, self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
# share
features_tmp = self.linear(input_features)
# features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
# ATE
features_tmp_a = self.hidden2tag_sub(input_features)
# features_tmp_a = nn.ReLU()(features_tmp)
features_tmp_a = self.dropout(features_tmp)
# OTE
features_tmp_o = self.hidden2tag_obj(input_features)
# features_tmp_o = nn.ReLU()(features_tmp)
features_tmp_o = self.dropout(features_tmp)
# cat
features_for_a = torch.cat([features_tmp, features_tmp_a], -1)
features_for_o = torch.cat([features_tmp, features_tmp_o], -1)
# classifier
sub_output = self.linear_a(features_for_a)
obj_output = self.linear_a(features_for_o)
return sub_output, obj_output
class SequenceLabelForTriple(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForTriple, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
# features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class MultiNonLinearClassifier(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(MultiNonLinearClassifier, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
features_tmp = self.linear(input_features)
# features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
features_output = self.hidden2tag(features_tmp)
return features_output
class SequenceLabelForGrid(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForGrid, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class PairGeneration(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, features, bias=False):
super(PairGeneration, self).__init__() # 32,13,300 32,300,13
self.features = features
# self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(features, features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(features))
else:
self.register_parameter('bias', None)
def forward(self, text):
hidden = torch.matmul(text.float(), self.weight)
# print(hidden.shape)
# denom = torch.sum(adj, dim=2, keepdim=True) + 1
# adj = torch.tensor(adj, dtype=torch.float32)
hidden_ = torch.tensor(hidden, dtype=torch.float32)
# print(hidden_.shape)
output = torch.matmul(hidden_, hidden.permute(0,2,1))
# print(output.shape)
if self.bias is not None:
return output + self.bias
else:
return output
class PairGeneration0(nn.Module):
def __init__(self, features, bias=False):
super(PairGeneration0, self).__init__() # 32,13,300 32,300,13
self.features = features
# self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(features, features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(features))
else:
self.register_parameter('bias', None)
def forward(self, text):
hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)
hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)
output = torch.cat((hidden_1, hidden_2),-1)
return output
class GCNLayer(nn.Module):
def __init__(self, in_dim, out_dim, acti=True):
super(GCNLayer, self).__init__()
self.linear = nn.Linear(in_dim, out_dim) # bias = False is also ok.
if acti:
self.acti = nn.ReLU(inplace=True)
else:
self.acti = None
def forward(self, F):
output = self.linear(F)
if not self.acti:
return output
return self.acti(output)
class GCNforFeature_1(nn.Module):
def __init__(self, input_dim, hidden_dim, p):
super(GCNforFeature_1, self).__init__()
self.gcn_layer1 = GCNLayer(input_dim, hidden_dim)
self.dropout = nn.Dropout(p)
def forward(self, A, X):
X = self.dropout(X.float())
F = torch.matmul(A, X)
output = self.gcn_layer1(F)
return output
class GCNforFeature_2(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, p):
super(GCNforFeature_2, self).__init__()
self.gcn_layer1 = GCNLayer(input_dim, hidden_dim)
self.gcn_layer2 = GCNLayer(hidden_dim, out_dim)
self.dropout = nn.Dropout(p)
def forward(self, A, X):
X = self.dropout(X.float())
F = torch.matmul(A, X)
F = self.gcn_layer1(F)
F = self.dropout(F.float())
F = torch.matmul(A, F)
output = self.gcn_layer1(F)
return output
class TS1(nn.Module):
def __init__(self, embedding_matrix, opt):
super(TS1, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.lstm = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
self.lstm_ = DynamicLSTM(300, 300, num_layers=1, batch_first=True, bidirectional=True, rnn_type = 'LSTM')
self.text_embed_dropout = nn.Dropout(0.5)
self.pairgeneration = PairGeneration0(600)
self.gcn0 = GCNforFeature_1(600, 300, 0.5)
self.gcn1 = GCNforFeature_1(600, 600, 0.5)
self.gcn2 = GCNforFeature_1(600, 300, 0.5)
self.aspect_opinion_classifier = SequenceLabelForAO(900, 3, 0.5)
self.pair_classifier = MultiNonLinearClassifier(1800, 3, 0.5)
self.triple_classifier = SequenceLabelForTriple(1800, 3, 0.5)
def forward(self, inputs, mask):
# input
text_indices, mask, global_adj, global_adj1, relevant_sentences, relevant_sentences_presentation, m_, n_, _,_,_ = inputs
# prepare
batch_size = text_indices.shape[0]
sentence_len = text_indices.shape[1]
rele_sen_num = relevant_sentences.shape[1]
rele_sen_len = relevant_sentences_presentation.shape[-1]
# process global adj to get formal adj and norm
formal_global_adj = generate_formal_adj(global_adj)
norm_global_adj = preprocess_adj(formal_global_adj)
global_adj1 = torch.reshape(global_adj1.permute(0,2,1,3), (batch_size, rele_sen_num, rele_sen_num*rele_sen_len))
formal_global_adj1 = generate_formal_adj(global_adj1)
norm_global_adj1 = preprocess_adj(formal_global_adj1)
# get sentence mask
mask_ = mask.view(-1,1)
# input sentnece s_0
text_len = torch.sum(text_indices != 0, dim=-1)
word_embeddings = self.embed(text_indices)
text = self.text_embed_dropout(word_embeddings)
text_out, (_, _) = self.lstm(text, text_len.cpu()) # 32, 13, 600
# relevant sentences, for every sentence s_0, there are T relevant sentences s_1, s_2, ..., s_T
relevant_sentences_presentation_ = torch.reshape(relevant_sentences_presentation, (-1, relevant_sentences_presentation.shape[-1]))
sentence_text_len = torch.sum(relevant_sentences_presentation_!= 0, dim=-1)
sentence_embedding = self.embed(relevant_sentences_presentation)
sentence_text_ = self.text_embed_dropout(sentence_embedding)
sentence_text = torch.reshape(sentence_text_, (-1, sentence_text_.shape[-2], sentence_text_.shape[-1]))
ones = torch.ones_like(sentence_text_len)
sentence_text_out, (sentence_text_out1, b_) = self.lstm_(sentence_text, torch.where(sentence_text_len <= 0, ones, sentence_text_len).cpu())
sentence_text_out = torch.reshape(sentence_text_out, (relevant_sentences.shape[0], relevant_sentences.shape[1], sentence_text_out.shape[-2], sentence_text_out.shape[-1]))
sentence_text_out1 = torch.reshape(sentence_text_out1, (relevant_sentences.shape[0], relevant_sentences.shape[1], -1))
attention = F.softmax(torch.matmul(sentence_text_out, sentence_text_out.permute(0,1,3,2)), dim=-1)
sentence_text_out2 = torch.matmul(attention, sentence_text_out).sum(2)
'''global graph0 to get features of words in sentence0, using the initial rele sentences features'''
# process formal features to match the formal adj
formal_global_features = torch.cat([text_out, sentence_text_out1], 1)
# GCN with local global graph
global_text_out = self.gcn0(norm_global_adj, formal_global_features)[:, :sentence_len, :]
rele_sentences_features = global_text_out
'''global graph1 to get relevant sentneces features using the initial rele sentence words feature,
then get the features of words in sentence0'''
# step 1 get the sentence fetures using words feature of rele sentences, global graph 1
relevant_sentence_features = sentence_text_out1
relevant_sentence_word_features = torch.reshape(sentence_text_out, (batch_size, rele_sen_num*rele_sen_len, -1))
formal_global_features1 = torch.cat([relevant_sentence_features, relevant_sentence_word_features], 1)
global_text_out1 = self.gcn1(norm_global_adj1, formal_global_features1)[:, :rele_sen_num, :]
rele_sentences_features1 = self.gcn2(norm_global_adj, torch.cat([text_out, global_text_out1], 1))[:, :sentence_len, :]
# unified features
# unified_text = torch.cat([text_out.float(), rele_sentences_features.float(), rele_sentences_features1.float()], -1)
unified_text = torch.cat([text_out.float(), rele_sentences_features1.float()], -1)
# pair generation
pair_text = self.pairgeneration(unified_text)
# AE and OE scores
aspect_probs, opinion_probs = self.aspect_opinion_classifier(unified_text.float())
aspect_probs, opinion_probs = aspect_probs.contiguous().view(-1, 3), opinion_probs.contiguous().view(-1, 3)
# pair scores
pair_probs_, pair_sentiment_probs_ = self.triple_classifier(pair_text.float())
pair_probs = pair_probs_.contiguous().view(-1, 3)
pair_sentiment_probs = pair_sentiment_probs_.contiguous().view(-1, 4)
return aspect_probs, opinion_probs, pair_probs, pair_sentiment_probs
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,961
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/train_with_glove.py
|
# -*- coding: utf-8 -*-
import os
import math
import argparse
import random
import numpy
import torch
import torch.nn as nn
import pdb
from bucket_iterator import BucketIterator
from sklearn import metrics
from data_utils import ABSADatasetReader
from models import TS, TS0, TS1, TS2, TS3, TS1_3
from evaluation_glove import get_metric, find_pair, find_term, compute_sentiment, find_pair_sentiment, find_grid_term
from utils import *
from sklearn.metrics import f1_score, precision_score, accuracy_score
from tqdm import tqdm
class Instructor:
def __init__(self, opt):
self.opt = opt
import datetime as dt
now_time = dt.datetime.now().strftime('%F %T')
absa_dataset = ABSADatasetReader(dataset=opt.dataset, embed_dim=opt.embed_dim)
# adj, features = load_corpus(dataset_str=opt.dataset)
self.train_data_loader = BucketIterator(data=absa_dataset.train_data, batch_size=opt.batch_size, shuffle=True)
self.dev_data_loader = BucketIterator(data=absa_dataset.dev_data, batch_size=opt.batch_size, shuffle=False)
self.test_data_loader = BucketIterator(data=absa_dataset.test_data, batch_size=opt.batch_size, shuffle=False, sort=False)
self.model = opt.model_class(absa_dataset.embedding_matrix, opt).to(opt.device)
self.f_out = open('log/'+ self.opt.dataset + '/' + self.opt.model_name+'_'+self.opt.dataset+'_val'+str(now_time)+'.txt', 'w', encoding='utf-8')
self._print_args()
self.global_f1 = 0.
if torch.cuda.is_available():
print('cuda memory allocated:', torch.cuda.memory_allocated(device=opt.device.index))
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape)).item()
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
print('n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
print('> training arguments:')
for arg in vars(self.opt):
print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def _reset_params(self):
for p in self.model.parameters():
if p.requires_grad:
if len(p.shape) > 1:
self.opt.initializer(p)
else:
stdv = 1. / math.sqrt(p.shape[0])
torch.nn.init.uniform_(p, a=-stdv, b=stdv)
def _train(self, criterion, optimizer):
max_aspect_dev_f1, max_opinion_dev_f1 = 0, 0
max_pair_dev_f1, max_pair_sentiment_dev_f1 = 0, 0
max_pair_sentiment_dev_f1_macro = 0
max_aspect_test_f1, max_opinion_test_f1 = 0, 0
max_pair_test_f1, max_pair_sentiment_test_f1 = 0, 0
max_pair_sentiment_test_f1_macro = 0
max_precision, max_recall = 0, 0
global_step = 0
continue_not_increase = 0
best_results, best_labels = [], []
for epoch in (range(self.opt.num_epoch)):
print('>' * 100)
print('epoch: ', epoch)
self.f_out.write('>' * 100+'\n')
self.f_out.write('epoch: {:.4f}\n'.format(epoch))
loss_g_a, loss_g_o, loss_g_s, loss_g_ag, loss_g_og, loss_g_p, loss_g_ps = 0, 0, 0, 0, 0, 0, 0
correct_g, predicted_g, relevant_g = 0, 0, 0
for i_batch, sample_batched in enumerate(self.train_data_loader):
global_step += 1
# switch model to training mode, clear gradient accumulators
self.model.train()
optimizer.zero_grad()
inputs = [sample_batched[col].to(self.opt.device) for col in self.opt.inputs_cols]
targets_aspect_sequence = sample_batched['aspect_sequence_labels'].to(self.opt.device)
targets_opinion_sequence = sample_batched['opinion_sequence_labels'].to(self.opt.device)
targets_pair = sample_batched['pair_grid_labels'].to(self.opt.device)
targets_pair_sentiment = sample_batched['triple_grid_labels'].to(self.opt.device)
mask = sample_batched['mask'].to(self.opt.device)
aspect_mask = sample_batched['aspect_mask'].to(self.opt.device)
aspect_mask_ = aspect_mask.reshape(-1).long()
bs, sen_len = mask.size()
mask_grid = torch.where(mask.unsqueeze(1).repeat(1,sen_len,1) == mask.unsqueeze(-1).repeat(1,1,sen_len),\
torch.ones([bs, sen_len, sen_len]).to(self.opt.device), \
torch.zeros([bs, sen_len, sen_len]).to(self.opt.device))
outputs_aspect, outputs_opinion, outputs_pair, outputs_pair_sentiment = self.model(inputs, mask)
outputs_aspect_env = outputs_aspect.argmax(dim=-1)
outputs_aspect_env = outputs_aspect_env.view(targets_aspect_sequence.shape[0],targets_aspect_sequence.shape[1])
outputs_aspect_, targets_aspect_ = outputs_aspect.reshape(-1,3), targets_aspect_sequence.reshape(-1).long()
outputs_opinion_env = outputs_opinion.argmax(dim=-1)
outputs_opinion_env = outputs_opinion_env.view(targets_opinion_sequence.shape[0],targets_opinion_sequence.shape[1])
outputs_opinion_, targets_opinion_ = outputs_opinion.reshape(-1,3), targets_opinion_sequence.reshape(-1).long()
outputs_pair_env = outputs_pair.argmax(dim=-1)
outputs_pair_env = outputs_pair_env.view(targets_pair.shape[0],targets_pair.shape[1],targets_pair.shape[2])
outputs_pair_, targets_pair_ = outputs_pair.reshape(-1,3), targets_pair.reshape(-1).long()
outputs_pair_sentiment_env = outputs_pair_sentiment.argmax(dim=-1)
outputs_pair_sentiment_env = outputs_pair_sentiment_env.view(targets_pair_sentiment.shape[0],targets_pair_sentiment.shape[1],targets_pair_sentiment.shape[2])
outputs_pair_sentiment_, targets_pair_sentiment_ = outputs_pair_sentiment.reshape(-1,4), targets_pair_sentiment.reshape(-1).long()
loss_aspect = (criterion(outputs_aspect_, targets_aspect_) * mask).sum() / mask.sum()
loss_opinion = (criterion(outputs_opinion_, targets_opinion_) * mask).sum() / mask.sum()
loss_pair = (criterion(outputs_pair_, targets_pair_) * mask_grid).sum() / mask_grid.sum()
# mask
loss_pair_sentiment = (criterion(outputs_pair_sentiment_, targets_pair_sentiment_) * mask_grid).sum() / mask_grid.sum()
# loss_mask = torch.where(targets_pair_>0, True, False)
# masked_targets_pair_sentiment_ = torch.masked_select(targets_pair_sentiment_, loss_mask, out=None)
# masked_outputs_pair_sentiment_ = torch.masked_select(outputs_pair_sentiment_, loss_mask.unsqueeze(-1).repeat(1,4), out=None).view(-1, 4)
# loss_pair_sentiment = criterion(masked_outputs_pair_sentiment_, masked_targets_pair_sentiment_)
# compute the uncertainty
loss_g_a, loss_g_o, loss_g_p, loss_g_ps = \
loss_g_a + loss_aspect, loss_g_o + loss_opinion, loss_g_p + loss_pair, loss_g_ps + loss_pair_sentiment
loss = loss_aspect + loss_opinion + loss_pair + loss_pair_sentiment
# loss = loss_pair + loss_pair_sentiment
loss.backward()
optimizer.step()
# if epoch == 7:
# pdb.set_trace()
dev_f_aspect, dev_f_opinion, dev_f_pair, dev_f_pair_sentiment, dev_f_pair_sentiment_macro, dev_loss = self._evaluate_acc_f1()
test_f_aspect, test_f_opinion, test_f_pair, [test_f_pair_sentiment, test_p_pair_sentiment, test_r_pair_sentiment], test_f_pair_sentiment_macro, results, labels, test_loss = self._test_acc_f1()
print('train loss: aspect {:.4f}, opinion {:.4f}, pair {:.4f}, pair_sentiment {:.4f}'\
.format(loss_g_a.item(), loss_g_o.item(), loss_g_p.item(), loss_g_ps.item()))
print('dev loss: aspect {:.4f}, opinion {:.4f}, pair {:.4f}, pair_sentiment {:.4f}'\
.format(dev_loss[0].item(), dev_loss[1].item(), dev_loss[2].item(), dev_loss[3].item()))
print('dev: f1-aspect: {:.4f}, f1-opinion: {:.4f}, f1-pair: {:.4f}, f1-pair-sentiment: {:.4f}, f1-pair-sentiment-macro: {:.4f}'.format(dev_f_aspect, dev_f_opinion, dev_f_pair, dev_f_pair_sentiment, dev_f_pair_sentiment_macro))
print('test loss: aspect {:.4f}, opinion {:.4f}, pair {:.4f}, pair_sentiment {:.4f}'\
.format(test_loss[0].item(), test_loss[1].item(), test_loss[2].item(), test_loss[3].item()))
print('test: f1-aspect: {:.4f}, f1-opinion: {:.4f}, f1-pair: {:.4f}, f1-pair-sentiment: {:.4f}, f1-pair-sentiment-macro: {:.4f}'.format(test_f_aspect, test_f_opinion, test_f_pair, test_f_pair_sentiment, test_f_pair_sentiment_macro))
self.f_out.write('train loss: aspect {:.4f}, opinion {:.4f}, pair {:.4f}, pair_sentiment {:.4f}\n'\
.format(loss_g_a.item(), loss_g_o.item(), loss_g_p.item(), loss_g_ps.item()))
self.f_out.write('dev loss: aspect {:.4f}, opinion {:.4f}, pair {:.4f}, pair_sentiment {:.4f}\n'\
.format(dev_loss[0].item(), dev_loss[1].item(), dev_loss[2].item(), dev_loss[3].item()))
self.f_out.write('dev: f1-aspect: {:.4f}, f1-opinion: {:.4f}, f1-pair: {:.4f}, f1-pair-sentiment: {:.4f}, f1-pair-sentiment-macro: {:.4f}\n'.format(dev_f_aspect, dev_f_opinion, dev_f_pair, dev_f_pair_sentiment, dev_f_pair_sentiment_macro))
self.f_out.write('test loss: aspect {:.4f}, opinion {:.4f}, pair {:.4f}, pair_sentiment {:.4f}\n'\
.format(test_loss[0].item(), test_loss[1].item(), test_loss[2].item(), test_loss[3].item()))
self.f_out.write('test: f1-aspect: {:.4f}, f1-opinion: {:.4f}, f1-pair: {:.4f}, f1-pair-sentiment: {:.4f}, f1-pair-sentiment-macro: {:.4f}\n'.format(test_f_aspect, test_f_opinion, test_f_pair, test_f_pair_sentiment, test_f_pair_sentiment_macro))
self.f_out.write('test: p-pair-sentiment: {:.4f}, r-pair-sentiment: {:.4f}\n'\
.format(test_p_pair_sentiment, test_r_pair_sentiment))
if dev_f_pair_sentiment > max_pair_sentiment_dev_f1:
max_pair_dev_f1 = dev_f_pair
max_aspect_dev_f1 = dev_f_aspect
max_opinion_dev_f1 = dev_f_opinion
max_pair_sentiment_dev_f1 = dev_f_pair_sentiment
max_pair_sentiment_dev_f1_macro = dev_f_pair_sentiment_macro
best_model = self.model
max_pair_test_f1 = test_f_pair
max_aspect_test_f1 = test_f_aspect
max_opinion_test_f1 = test_f_opinion
max_pair_sentiment_test_f1 = test_f_pair_sentiment
max_pair_sentiment_test_f1_macro = test_f_pair_sentiment_macro
best_results = results
best_labels = labels
max_precision, max_recall = test_p_pair_sentiment, test_r_pair_sentiment
self.f_out.write('dev: {:.4f}, test: {:.4f}'.format(max_pair_sentiment_dev_f1, max_pair_sentiment_test_f1))
return max_aspect_dev_f1, max_opinion_dev_f1, max_pair_dev_f1, max_pair_sentiment_dev_f1,\
max_aspect_test_f1, max_opinion_test_f1, max_pair_test_f1, max_pair_sentiment_test_f1,\
max_precision, max_recall,\
best_results, best_labels, best_model
def _evaluate_acc_f1(self):
# switch model to evaluation mode
self.model.eval()
criterion = nn.CrossEntropyLoss()
predicted_p, relevant_p, correct_p = 0, 0, 0
predicted_ps, relevant_ps, correct_ps = 0, 0, 0
predicted_a, relevant_a, correct_a = 0, 0, 0
predicted_o, relevant_o, correct_o = 0, 0, 0
predicted_ps_macro, relevant_ps_macro, correct_ps_macro = {'pos':0, 'neg':0, 'neu':0}, {'pos':0, 'neg':0, 'neu':0}, {'pos':0, 'neg':0, 'neu':0}
dic = {1:'pos', 2:'neg', 3:'neu'}
loss_g_a, loss_g_o, loss_g_s, loss_g_p, loss_g_ps, loss_g_ag, loss_g_og = 0, 0, 0, 0, 0, 0, 0
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(self.dev_data_loader):
t_inputs = [t_sample_batched[col].to(opt.device) for col in self.opt.inputs_cols]
t_targets_aspect = t_sample_batched['aspect_sequence_labels'].to(self.opt.device)
t_targets_opinion = t_sample_batched['opinion_sequence_labels'].to(self.opt.device)
t_targets_sentiment = t_sample_batched['sentiment_sequence_labels'].to(self.opt.device)
t_targets_pair = t_sample_batched['pair_grid_labels'].to(self.opt.device)
t_targets_pair_sentiment = t_sample_batched['triple_grid_labels'].to(self.opt.device)
t_targets_mask = t_sample_batched['mask'].to(self.opt.device)
t_aspect_mask = t_sample_batched['aspect_mask'].to(self.opt.device)
t_aspect_mask_ = t_aspect_mask.reshape(-1).long()
bs, sen_len = t_targets_mask.size()
t_targets_mask_grid = torch.where(t_targets_mask.unsqueeze(1).repeat(1,sen_len,1) == t_targets_mask.unsqueeze(-1).repeat(1,1,sen_len),\
torch.ones([bs, sen_len, sen_len]).to(self.opt.device), \
torch.zeros([bs, sen_len, sen_len]).to(self.opt.device))
t_outputs_aspect, t_outputs_opinion, t_outputs_pair, t_outputs_pair_sentiment = self.model(t_inputs, t_targets_mask)
t_outputs_aspect_env = t_outputs_aspect.argmax(dim=-1).view(t_targets_aspect.shape[0],t_targets_pair.shape[1])
t_outputs_opinion_env = t_outputs_opinion.argmax(dim=-1).view(t_targets_opinion.shape[0],t_targets_pair.shape[1])
t_outputs_pair_env = t_outputs_pair.argmax(dim=-1).view(t_targets_pair.shape[0],t_targets_pair.shape[1],t_targets_pair.shape[2])
t_outputs_pair_sentiment_env = t_outputs_pair_sentiment.argmax(dim=-1).view(t_targets_pair_sentiment.shape[0],t_targets_pair_sentiment.shape[1],t_targets_pair_sentiment.shape[2])
# compute loss
outputs_aspect_, targets_aspect_ = t_outputs_aspect.reshape(-1,3), t_targets_aspect.reshape(-1).long()
outputs_opinion_, targets_opinion_ = t_outputs_opinion.reshape(-1,3), t_targets_opinion.reshape(-1).long()
outputs_pair_, targets_pair_ = t_outputs_pair.reshape(-1,3), t_targets_pair.reshape(-1).long()
outputs_pair_sentiment_, targets_pair_sentiment_ = t_outputs_pair_sentiment.reshape(-1,4), t_targets_pair_sentiment.reshape(-1).long()
loss_aspect = (criterion(outputs_aspect_, targets_aspect_)*t_targets_mask).sum() / t_targets_mask.sum()
loss_opinion = (criterion(outputs_opinion_, targets_opinion_)*t_targets_mask).sum() / t_targets_mask.sum()
loss_pair = (criterion(outputs_pair_, targets_pair_)*t_targets_mask_grid).sum() / t_targets_mask_grid.sum()
loss_pair_sentiment = (criterion(outputs_pair_sentiment_, targets_pair_sentiment_)*t_targets_mask_grid).sum() / t_targets_mask_grid.sum()
loss_g_a, loss_g_o, loss_g_p, loss_g_ps = \
loss_g_a + loss_aspect, loss_g_o + loss_opinion, loss_g_p + loss_pair, loss_g_ps + loss_pair_sentiment
# metrics
outputs_a = (t_outputs_aspect_env*t_targets_mask).cpu().numpy().tolist()
targets_a = t_targets_aspect.cpu().numpy().tolist()
outputs_o = (t_outputs_opinion_env*t_targets_mask).cpu().numpy().tolist()
targets_o = t_targets_opinion.cpu().numpy().tolist()
outputs_p = (t_outputs_pair_env*t_targets_mask_grid).cpu().numpy().tolist()
targets_p = t_targets_pair.cpu().numpy().tolist()
outputs_ps = (t_outputs_pair_sentiment_env*t_targets_mask_grid).cpu().numpy().tolist()
targets_ps = t_targets_pair_sentiment.cpu().numpy().tolist()
# f1 for aspect
for out, tar in zip(outputs_a, targets_a):
predict_aspect = find_term(out)
true_aspect = find_term(tar)
predicted_a += len(predict_aspect)
relevant_a += len(true_aspect)
for aspect in predict_aspect:
if aspect in true_aspect:
correct_a += 1
# f1 for opinion
for out, tar in zip(outputs_o, targets_o):
predict_opinion = find_term(out)
true_opinion = find_term(tar)
predicted_o += len(predict_opinion)
relevant_o += len(true_opinion)
for opinion in predict_opinion:
if opinion in true_opinion:
correct_o += 1
# f1 for pair
for out, tar in zip(outputs_p, targets_p):
predict_pairs = find_pair(out)
true_pairs = find_pair(tar)
predicted_p += len(predict_pairs)
relevant_p += len(true_pairs)
for pair in predict_pairs:
if pair in true_pairs:
correct_p += 1
# f1 for sentiment pair
for out, tar, out_s, tar_s in zip(outputs_p, targets_p, outputs_ps, targets_ps):
predict_pairs_sentiment = find_pair_sentiment(out, out_s)
true_pairs_sentiment = find_pair_sentiment(tar, tar_s)
# micro
predicted_ps += len(predict_pairs_sentiment)
relevant_ps += len(true_pairs_sentiment)
for pair in predict_pairs_sentiment:
if pair in true_pairs_sentiment:
correct_ps += 1
# macro
for tri in predict_pairs_sentiment:
predicted_ps_macro[dic[tri[2]]]+=1
for tri in true_pairs_sentiment:
relevant_ps_macro[dic[tri[2]]]+=1
for pair in predict_pairs_sentiment:
if pair in true_pairs_sentiment:
correct_ps_macro[dic[tri[2]]] += 1
# micro
p_pair_sentiment = correct_ps / (predicted_ps + 1e-6)
r_pair_sentiment = correct_ps / (relevant_ps + 1e-6)
f_pair_sentiment = 2 * p_pair_sentiment * r_pair_sentiment / (p_pair_sentiment + r_pair_sentiment + 1e-6)
# macro
p_pair_sentiment_pos, p_pair_sentiment_neg, p_pair_sentiment_neu = \
correct_ps_macro['pos'] / (predicted_ps_macro['pos'] + 1e-6), correct_ps_macro['neg'] / (predicted_ps_macro['neg'] + 1e-6), correct_ps_macro['neu'] / (predicted_ps_macro['neu'] + 1e-6)
r_pair_sentiment_pos, r_pair_sentiment_neg, r_pair_sentiment_neu = \
correct_ps_macro['pos'] / (relevant_ps_macro['pos'] + 1e-6), correct_ps_macro['neg'] / (relevant_ps_macro['neg'] + 1e-6), correct_ps_macro['neu'] / (relevant_ps_macro['neu'] + 1e-6)
f_pair_sentiment_pos, f_pair_sentiment_neg, f_pair_sentiment_neu = \
2 * p_pair_sentiment_pos * r_pair_sentiment_pos / (p_pair_sentiment_pos + r_pair_sentiment_pos + 1e-6),\
2 * p_pair_sentiment_neg * r_pair_sentiment_neg / (p_pair_sentiment_neg + r_pair_sentiment_neg + 1e-6),\
2 * p_pair_sentiment_neu * r_pair_sentiment_neu / (p_pair_sentiment_neu + r_pair_sentiment_neu + 1e-6)
f_pair_sentiment_macro = (f_pair_sentiment_pos + f_pair_sentiment_neg + f_pair_sentiment_neu) / 3.0
p_pair = correct_p / (predicted_p + 1e-6)
r_pair = correct_p / (relevant_p + 1e-6)
f_pair = 2 * p_pair * r_pair / (p_pair + r_pair + 1e-6)
p_aspect = correct_a / (predicted_a + 1e-6)
r_aspect = correct_a / (relevant_a + 1e-6)
f_aspect = 2 * p_aspect * r_aspect / (p_aspect + r_aspect + 1e-6)
p_opinion = correct_o / (predicted_o + 1e-6)
r_opinion = correct_o / (relevant_o + 1e-6)
f_opinion = 2 * p_opinion * r_opinion / (p_opinion + r_opinion + 1e-6)
return f_aspect, f_opinion, f_pair, f_pair_sentiment, f_pair_sentiment_macro, [loss_g_a, loss_g_o, loss_g_p, loss_g_ps]
def _test_acc_f1(self):
# switch model to evaluation mode
self.model.eval()
criterion = nn.CrossEntropyLoss()
predicted_p, relevant_p, correct_p = 0, 0, 0
predicted_ps, relevant_ps, correct_ps = 0, 0, 0
predicted_a, relevant_a, correct_a = 0, 0, 0
predicted_o, relevant_o, correct_o = 0, 0, 0
predicted_ps_macro, relevant_ps_macro, correct_ps_macro = {'pos':0, 'neg':0, 'neu':0}, {'pos':0, 'neg':0, 'neu':0}, {'pos':0, 'neg':0, 'neu':0}
dic = {1:'pos', 2:'neg', 3:'neu'}
loss_g_a, loss_g_o, loss_g_p, loss_g_ps = 0, 0, 0, 0
aspect_results, opinion_results, sentiment_results, pair_results, pair_sentiment_results = [], [], [], [], []
aspect_labels, opinion_labels, sentiment_labels, pair_labels, pair_sentiment_labels = [], [], [], [], []
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(self.test_data_loader):
t_inputs = [t_sample_batched[col].to(opt.device) for col in self.opt.inputs_cols]
t_targets_pair = t_sample_batched['pair_grid_labels'].to(self.opt.device)
t_targets_pair_sentiment = t_sample_batched['triple_grid_labels'].to(self.opt.device)
t_targets_aspect = t_sample_batched['aspect_sequence_labels'].to(self.opt.device)
t_targets_opinion = t_sample_batched['opinion_sequence_labels'].to(self.opt.device)
t_targets_mask = t_sample_batched['mask'].to(self.opt.device)
t_aspect_mask = t_sample_batched['aspect_mask'].to(self.opt.device)
t_aspect_mask_ = t_aspect_mask.reshape(-1).long()
bs, sen_len = t_targets_mask.size()
t_targets_mask_grid = torch.where(t_targets_mask.unsqueeze(1).repeat(1,sen_len,1) == t_targets_mask.unsqueeze(-1).repeat(1,1,sen_len),\
torch.ones([bs, sen_len, sen_len]).to(self.opt.device), \
torch.zeros([bs, sen_len, sen_len]).to(self.opt.device))
t_outputs_aspect, t_outputs_opinion, t_outputs_pair, t_outputs_pair_sentiment = self.model(t_inputs, t_targets_mask)
t_outputs_aspect_env = t_outputs_aspect.argmax(dim=-1).view(t_targets_aspect.shape[0],t_targets_pair.shape[1])
t_outputs_opinion_env = t_outputs_opinion.argmax(dim=-1).view(t_targets_opinion.shape[0],t_targets_pair.shape[1])
t_outputs_pair_env = t_outputs_pair.argmax(dim=-1).view(t_targets_pair.shape[0],t_targets_pair.shape[1],t_targets_pair.shape[2])
t_outputs_pair_sentiment_env = t_outputs_pair_sentiment.argmax(dim=-1).view(t_targets_pair_sentiment.shape[0],t_targets_pair_sentiment.shape[1],t_targets_pair_sentiment.shape[2])
# compute loss
outputs_aspect_, targets_aspect_ = t_outputs_aspect.reshape(-1,3), t_targets_aspect.reshape(-1).long()
outputs_opinion_, targets_opinion_ = t_outputs_opinion.reshape(-1,3), t_targets_opinion.reshape(-1).long()
outputs_pair_, targets_pair_ = t_outputs_pair.reshape(-1,3), t_targets_pair.reshape(-1).long()
outputs_pair_sentiment_, targets_pair_sentiment_ = t_outputs_pair_sentiment.reshape(-1,4), t_targets_pair_sentiment.reshape(-1).long()
loss_aspect = (criterion(outputs_aspect_, targets_aspect_)*t_targets_mask).sum() / t_targets_mask.sum()
loss_opinion = (criterion(outputs_opinion_, targets_opinion_)*t_targets_mask).sum() / t_targets_mask.sum()
loss_pair = (criterion(outputs_pair_, targets_pair_)*t_targets_mask_grid).sum() / t_targets_mask_grid.sum()
loss_pair_sentiment = (criterion(outputs_pair_sentiment_, targets_pair_sentiment_)*t_targets_mask_grid).sum() / t_targets_mask_grid.sum()
loss_g_a, loss_g_o, loss_g_p, loss_g_ps = \
loss_g_a + loss_aspect, loss_g_o + loss_opinion, loss_g_p + loss_pair, loss_g_ps + loss_pair_sentiment
# metrics
outputs_a = (t_outputs_aspect_env*t_targets_mask).cpu().numpy().tolist()
targets_a = t_targets_aspect.cpu().numpy().tolist()
outputs_o = (t_outputs_opinion_env*t_targets_mask).cpu().numpy().tolist()
targets_o = t_targets_opinion.cpu().numpy().tolist()
outputs_p = (t_outputs_pair_env*t_targets_mask_grid).cpu().numpy().tolist()
targets_p = t_targets_pair.cpu().numpy().tolist()
outputs_ps = (t_outputs_pair_sentiment_env*t_targets_mask_grid).cpu().numpy().tolist()
targets_ps = t_targets_pair_sentiment.cpu().numpy().tolist()
# f1 for aspect
for out, tar in zip(outputs_a, targets_a):
predict_aspect = find_term(out)
true_aspect = find_term(tar)
predicted_a += len(predict_aspect)
relevant_a += len(true_aspect)
for aspect in predict_aspect:
if aspect in true_aspect:
correct_a += 1
# f1 for opinion
for out, tar in zip(outputs_o, targets_o):
predict_opinion = find_term(out)
true_opinion = find_term(tar)
predicted_o += len(predict_opinion)
relevant_o += len(true_opinion)
for opinion in predict_opinion:
if opinion in true_opinion:
correct_o += 1
# f1 for pair
for out, tar in zip(outputs_p, targets_p):
predict_pairs = find_pair(out)
true_pairs = find_pair(tar)
predicted_p += len(predict_pairs)
relevant_p += len(true_pairs)
for pair in predict_pairs:
if pair in true_pairs:
correct_p += 1
# f1 for sentiment pair
for out, tar, out_s, tar_s in zip(outputs_p, targets_p, outputs_ps, targets_ps):
predict_pairs_sentiment = find_pair_sentiment(out, out_s)
true_pairs_sentiment = find_pair_sentiment(tar, tar_s)
# micro
predicted_ps += len(predict_pairs_sentiment)
relevant_ps += len(true_pairs_sentiment)
for pair in predict_pairs_sentiment:
if pair in true_pairs_sentiment:
correct_ps += 1
# macro
for tri in predict_pairs_sentiment:
predicted_ps_macro[dic[tri[2]]]+=1
for tri in true_pairs_sentiment:
relevant_ps_macro[dic[tri[2]]]+=1
for pair in predict_pairs_sentiment:
if pair in true_pairs_sentiment:
correct_ps_macro[dic[tri[2]]] += 1
# save results and labels
aspect_results.append(t_outputs_aspect.view(t_targets_pair.shape[0], -1, 3).cpu().numpy().tolist())
opinion_results.append(t_outputs_opinion.view(t_targets_pair.shape[0], -1, 3).cpu().numpy().tolist())
pair_results.append(t_outputs_pair.view(t_targets_pair.shape[0], t_targets_aspect.shape[-1], t_targets_aspect.shape[-1], 3).cpu().numpy().tolist())
pair_sentiment_results.append(t_outputs_pair_sentiment.view(t_targets_pair.shape[0], t_targets_aspect.shape[-1], t_targets_aspect.shape[-1], 4).cpu().numpy().tolist())
aspect_labels.append(t_targets_aspect.cpu().numpy().tolist())
opinion_labels.append(t_targets_opinion.cpu().numpy().tolist())
pair_labels.append(t_targets_pair.cpu().numpy().tolist())
pair_sentiment_labels.append(t_targets_pair_sentiment.cpu().numpy().tolist())
# micro
p_pair_sentiment = correct_ps / (predicted_ps + 1e-6)
r_pair_sentiment = correct_ps / (relevant_ps + 1e-6)
f_pair_sentiment = 2 * p_pair_sentiment * r_pair_sentiment / (p_pair_sentiment + r_pair_sentiment + 1e-6)
# macro
p_pair_sentiment_pos, p_pair_sentiment_neg, p_pair_sentiment_neu = \
correct_ps_macro['pos'] / (predicted_ps_macro['pos'] + 1e-6), correct_ps_macro['neg'] / (predicted_ps_macro['neg'] + 1e-6), correct_ps_macro['neu'] / (predicted_ps_macro['neu'] + 1e-6)
r_pair_sentiment_pos, r_pair_sentiment_neg, r_pair_sentiment_neu = \
correct_ps_macro['pos'] / (relevant_ps_macro['pos'] + 1e-6), correct_ps_macro['neg'] / (relevant_ps_macro['neg'] + 1e-6), correct_ps_macro['neu'] / (relevant_ps_macro['neu'] + 1e-6)
f_pair_sentiment_pos, f_pair_sentiment_neg, f_pair_sentiment_neu = \
2 * p_pair_sentiment_pos * r_pair_sentiment_pos / (p_pair_sentiment_pos + r_pair_sentiment_pos + 1e-6),\
2 * p_pair_sentiment_neg * r_pair_sentiment_neg / (p_pair_sentiment_neg + r_pair_sentiment_neg + 1e-6),\
2 * p_pair_sentiment_neu * r_pair_sentiment_neu / (p_pair_sentiment_neu + r_pair_sentiment_neu + 1e-6)
f_pair_sentiment_macro = (f_pair_sentiment_pos + f_pair_sentiment_neg + f_pair_sentiment_neu) / 3.0
p_pair = correct_p / (predicted_p + 1e-6)
r_pair = correct_p / (relevant_p + 1e-6)
f_pair = 2 * p_pair * r_pair / (p_pair + r_pair + 1e-6)
p_aspect = correct_a / (predicted_a + 1e-6)
r_aspect = correct_a / (relevant_a + 1e-6)
f_aspect = 2 * p_aspect * r_aspect / (p_aspect + r_aspect + 1e-6)
p_opinion = correct_o / (predicted_o + 1e-6)
r_opinion = correct_o / (relevant_o + 1e-6)
f_opinion = 2 * p_opinion * r_opinion / (p_opinion + r_opinion + 1e-6)
results = [aspect_results, opinion_results, sentiment_results, pair_results, pair_sentiment_results]
labels = [aspect_labels, opinion_labels, sentiment_labels, pair_labels, pair_sentiment_labels]
return f_aspect, f_opinion, f_pair, [f_pair_sentiment, p_pair_sentiment, r_pair_sentiment], f_pair_sentiment_macro, results, labels, [loss_g_a, loss_g_o, loss_g_p, loss_g_ps]
def run(self):
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
if not os.path.exists('log/'):
os.mkdir('log/')
import datetime as dt
now_time = dt.datetime.now().strftime('%F %T')
# f_out = open('log/'+self.opt.model_name+'_'+self.opt.dataset+'_val'+str(now_time)+'.txt', 'w', encoding='utf-8')
# print args
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape)).item()
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
self.f_out.write('n_trainable_params: {0}, n_nontrainable_params: {1}\n'.format(n_trainable_params, n_nontrainable_params)+'\n')
self.f_out.write('> training arguments:\n')
for arg in vars(self.opt):
self.f_out.write('>>> {0}: {1}'.format(arg, getattr(self.opt, arg))+'\n')
max_aspect_test_f1_avg = 0
max_opinion_test_f1_avg = 0
max_sentiment_test_f1_avg = 0
max_absa_test_f1_avg = 0
max_pair_test_f1_avg = 0
max_pair_sentiment_test_f1_avg = 0
max_precision_avg, max_recall_avg = 0, 0
for i in range(self.opt.repeats):
repeats = self.opt.repeats
print('repeat: ', (i+1))
self.f_out.write('repeat: '+str(i+1)+'\n')
self._reset_params()
_params = filter(lambda p: p.requires_grad, self.model.parameters())
# _params = self.model.parameters()
optimizer = self.opt.optimizer(_params, lr=self.opt.learning_rate, weight_decay=self.opt.l2reg)
# max_pair_dev_f1, max_aspect_dev_f1, max_opinion_dev_f1, max_pair_test_f1, max_aspect_test_f1, max_opinion_test_f1 = self._train(criterion, optimizer)
max_aspect_dev_f1, max_opinion_dev_f1, max_pair_dev_f1, max_pair_sentiment_dev_f1,\
max_aspect_test_f1, max_opinion_test_f1, max_pair_test_f1, max_pair_sentiment_test_f1, max_precision, max_recall, best_results, best_labels, best_model = self._train(criterion, optimizer)
if self.opt.save_model == 1:
torch.save(best_model.bert_model, './save_bert_model/' + self.opt.model_name + '_' + self.opt.dataset + '.pkl')
if self.opt.write_results == 1:
results_a, results_o, results_s, results_p, results_ps = best_results
labels_a, labels_o, labels_s, labels_p, labels_ps = best_labels
# write results
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'results_a.npy', results_a)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'results_o.npy', results_o)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'results_s.npy', results_s)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'results_p.npy', results_p)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'results_ps.npy', results_ps)
# write labels
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'labels_a.npy', labels_a)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'labels_o.npy', labels_o)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'labels_s.npy', labels_s)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'labels_p.npy', labels_p)
np.save('./write_results/'+self.opt.dataset+'/'+self.opt.dataset+'labels_ps.npy', labels_ps)
print('max_aspect_dev_f1: {:.4f}, max_opinion_dev_f1: {:.4f}, max_pair_dev_f1: {:.4f}, max_pair_sentiment_dev_f1: {:.4f}'.format(max_aspect_dev_f1, max_opinion_dev_f1, max_pair_dev_f1, max_pair_sentiment_dev_f1))
print('max_aspect_test_f1: {:.4f}, max_opinion_test_f1: {:.4f}, max_pair_test_f1: {:.4f}, max_pair_sentiment_test_f1: {:.4f}'.format(max_aspect_test_f1, max_opinion_test_f1, max_pair_test_f1, max_pair_sentiment_test_f1))
self.f_out.write('max_aspect_dev_f1: {:.4f}, max_opinion_dev_f1: {:.4f}, max_pair_dev_f1: {:.4f}, max_pair_sentiment_dev_f1: {:.4f}\n'\
.format(max_aspect_dev_f1, max_opinion_dev_f1, max_pair_dev_f1, max_pair_sentiment_dev_f1)+'\n')
self.f_out.write('max_aspect_test_f1: {:.4f}, max_opinion_test_f1: {:.4f}, max_pair_test_f1: {:.4f}, max_pair_sentiment_test_f1: {:.4f}\n'\
.format(max_aspect_test_f1, max_opinion_test_f1, max_pair_test_f1, max_pair_sentiment_test_f1)+'\n')
self.f_out.write('max_test_precision: {:.4f}, max_test_recall: {:.4f}\n'\
.format(max_precision, max_recall)+'\n')
max_aspect_test_f1_avg += max_aspect_test_f1
max_opinion_test_f1_avg += max_opinion_test_f1
max_pair_test_f1_avg += max_pair_test_f1
max_pair_sentiment_test_f1_avg += max_pair_sentiment_test_f1
max_precision_avg += max_precision
max_recall_avg += max_recall
print('#' * 100)
print("max_aspect_test_f1_avg:", max_aspect_test_f1_avg / repeats)
print("max_opinion_test_f1_avg:", max_opinion_test_f1_avg / repeats)
print("max_pair_test_f1_avg:", max_pair_test_f1_avg / repeats)
print("max_pair_sentiment_test_f1_avg:", max_pair_sentiment_test_f1_avg / repeats)
self.f_out.write("max_aspect_test_f1_avg:"+ str(max_aspect_test_f1_avg / repeats) + '\n')
self.f_out.write("max_opinion_test_f1_avg:"+ str(max_opinion_test_f1_avg / repeats) + '\n')
self.f_out.write("max_pair_test_f1_avg:" + str(max_pair_test_f1_avg / repeats) + '\n')
self.f_out.write("max_pair_sentiment_test_f1_avg:" + str(max_pair_sentiment_test_f1_avg / repeats) + '\n')
self.f_out.write("max_precision_avg:" + str(max_precision_avg / repeats) + '\n')
self.f_out.write("max_recall_avg:" + str(max_recall_avg / repeats) + '\n')
self.f_out.close()
if __name__ == '__main__':
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='ts', type=str)
parser.add_argument('--dataset', default='lap14', type=str, help='res14, lap14, res15')
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--initializer', default='xavier_uniform_', type=str)
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--l2reg', default=0.00001, type=float)
parser.add_argument('--num_epoch', default=100, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--log_step', default=10, type=int)
parser.add_argument('--embed_dim', default=300, type=int)
parser.add_argument('--hidden_dim', default=300, type=int)
parser.add_argument('--polarities_dim', default=3, type=int)
parser.add_argument('--save', default=False, type=bool)
parser.add_argument('--seed', default=123, type=int)
parser.add_argument('--device', default='cuda', type=str)
parser.add_argument('--repeats', default=3, type=int)
parser.add_argument('--use_graph0', default=1, type=int)
parser.add_argument('--use_graph1', default=0, type=int)
parser.add_argument('--use_graph2', default=0, type=int)
parser.add_argument('--use_graph3', default=0, type=int)
parser.add_argument('--write_results', default=0, type=int)
parser.add_argument('--save_model', default=0, type=int)
parser.add_argument('--emb_for_ao', default='private_single', type=str, help='private_single, private_multi, shared_multi' )
parser.add_argument('--emb_for_ps', default='private_single', type=str, help='private_single, private_multi, shared_multi' )
parser.add_argument('--use_aspect_opinion_sequence_mask', default=0, type=int, help='1: use the predicted aspect_sequence_label and opinion_sequence_label to construct a grid mask for pair prediction.' )
parser.add_argument('--gcn_layers_in_graph0', default=1, type=int, help='1 or 2' )
opt = parser.parse_args()
model_classes = {
'ts': TS,
'ts0': TS0,
'ts1': TS1,
'ts2': TS2,
'ts3': TS3,
'ts1_3': TS1_3
}
input_colses = {
'ts': ['text_indices', 'mask', 'aspect_sequence_labels','opinion_sequence_labels','sentiment_sequence_labels'],\
'ts0': ['text_indices', 'mask', 'global_graph0', 'relevant_sentences', 'relevant_sentences_presentation', \
'pair_grid_labels', 'triple_grid_labels', 'aspect_sequence_labels','opinion_sequence_labels','sentiment_sequence_labels'],\
'ts1': ['text_indices', 'mask', 'global_graph0', 'global_graph1', 'relevant_sentences', 'relevant_sentences_presentation', \
'pair_grid_labels', 'triple_grid_labels', 'aspect_sequence_labels','opinion_sequence_labels','sentiment_sequence_labels'],\
'ts2': ['text_indices', 'mask', 'global_graph0', 'global_graph1', 'global_graph2', 'relevant_sentences', 'relevant_sentences_presentation', \
'pair_grid_labels', 'triple_grid_labels', 'aspect_sequence_labels','opinion_sequence_labels','sentiment_sequence_labels'],\
'ts3': ['text_indices', 'mask', 'global_graph0', 'global_graph1', 'global_graph3', 'relevant_sentences', 'relevant_sentences_presentation', \
'pair_grid_labels', 'triple_grid_labels', 'aspect_sequence_labels','opinion_sequence_labels','sentiment_sequence_labels'],\
'ts1_3': ['text_indices', 'mask', 'global_graph0', 'global_graph1', 'global_graph3', 'relevant_sentences', 'relevant_sentences_presentation', \
'pair_grid_labels', 'triple_grid_labels', 'aspect_sequence_labels','opinion_sequence_labels','sentiment_sequence_labels']
}
initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal_,
'orthogonal_': torch.nn.init.orthogonal_,
}
optimizers = {
'adadelta': torch.optim.Adadelta, # default lr=1.0
'adagrad': torch.optim.Adagrad, # default lr=0.01
'adam': torch.optim.Adam, # default lr=0.001
'adamax': torch.optim.Adamax, # default lr=0.002
'asgd': torch.optim.ASGD, # default lr=0.01
'rmsprop': torch.optim.RMSprop, # default lr=0.01
'sgd': torch.optim.SGD,
}
opt.model_class = model_classes[opt.model_name]
opt.inputs_cols = input_colses[opt.model_name]
opt.initializer = initializers[opt.initializer]
opt.optimizer = optimizers[opt.optimizer]
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if opt.device is None else torch.device(opt.device)
if opt.seed is not None:
random.seed(opt.seed)
numpy.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
ins = Instructor(opt)
ins.run()
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,962
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/run_bert.py
|
import os
import sys
# res14
os.system('CUDA_VISIBLE_DEVICES=0 python train_with_bert.py --learning_rate 5e-5 --dataset res14 --num_epoch 200 --batch_size 8 --if_lambda 0')
# os.system('CUDA_VISIBLE_DEVICES=0 python train_with_bert.py --learning_rate 5e-5 --dataset res14 --num_epoch 100 --batch_size 32 --if_lambda 0')
os.system('CUDA_VISIBLE_DEVICES=0 python train_with_bert.py --learning_rate 5e-5 --dataset res14 --num_epoch 200 --batch_size 32 --if_lambda 0')
# os.system('CUDA_VISIBLE_DEVICES=0 python train_with_bert.py --learning_rate 5e-5 --dataset res14 --num_epoch 100 --batch_size 8 --if_lambda 0')
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,963
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/bucket_iterator.py
|
# -*- coding: utf-8 -*-
import math
import random
import torch
import numpy
import numpy as np
import pdb
class BucketIterator(object):
def __init__(self, data, batch_size, sort_key='text_indices', shuffle=True, sort=True):
self.shuffle = shuffle
self.sort = sort
self.sort_key = sort_key
self.batches = self.sort_and_pad(data, batch_size)
self.batch_len = len(self.batches)
def sort_and_pad(self, data, batch_size):
num_batch = int(math.ceil(len(data) / batch_size))
if self.sort:
sorted_data = sorted(data, key=lambda x: len(x[self.sort_key]))
else:
sorted_data = data
batches = []
for i in range(num_batch):
batches.append(self.pad_data(sorted_data[i*batch_size : (i+1)*batch_size]))
return batches
def pad_data(self, batch_data):
batch_text_indices, batch_mask, batch_aspect_mask = [], [], []
# batch_mask, batch_aspect_mask = [], []
# batch_target_triple = []
# batch_local_graph, batch_global_graph, batch_relevant_sentences = [], [], []
batch_global_graph0, batch_global_graph1, batch_global_graph2, batch_global_graph3 = [], [], [], []
# batch_local_graph_pmi = []
batch_relevant_sentences, batch_relevant_sentences_presentation = [], []
batch_pair_grid_labels, batch_triple_grid_labels = [], []
# batch_aspect_grid_labels, batch_opinion_grid_labels = [], []
batch_aspect_sequence_labels, batch_opinion_sequence_labels, batch_sentiment_sequence_labels = [], [], []
# 最大sentence0长度
max_len = max([len(t[self.sort_key]) for t in batch_data])
# 最大relevant sentence个数
max_sen_num = max([len(t['relevant_sentences']) for t in batch_data])
# 最大relevant sentence长度
max_relevant_sen_len = 0
for batch in batch_data:
sentences = batch['relevant_sentence_presentation']
for sentence in sentences:
tem = len(sentence)
if tem > max_relevant_sen_len:
max_relevant_sen_len = tem
for item in batch_data:
# text_indices, target_triple, mask, global_graph0, local_graph, relevant_sentences, \
# relevant_sentence_presentation, pair_labels, sentiment_labels, local_graph_pmi = \
# item['text_indices'], item['mask'], item['global_graph0'], item['local_graph'], item['relevant_sentences'],\
# item['relevant_sentence_presentation'], item['pair_labels'], item['sentiment_labels'], item['local_graph_pmi']
# read data
text_indices, mask = item['text_indices'], item['mask']
relevant_sentences, relevant_sentence_presentation = item['relevant_sentences'], item['relevant_sentence_presentation']
global_graph0, global_graph1, global_graph2, global_graph3 = item['global_graph0'], item['global_graph1'], item['global_graph2'], item['global_graph3']
aspect_sequence_label, opinion_sequence_label, sentiment_sequence_label = \
item['aspect_sequence_label'], item['opinion_sequence_label'], item['sentiment_sequence_label']
aspect_span_label, opinion_span_label = item['aspect_span_labels'], item['opinion_span_labels']
pair_grid_label, triple_grid_label = item['pair_grid_labels'], item['triple_grid_labels']
# aspect_grid_label, opinion_grid_label = item['aspect_grid_label'], item['opinion_grid_label']
# padding relevant sentence_presentation
relevant_sentence_presentation_ = []
for re_sentence in relevant_sentence_presentation:
temm = re_sentence
for t in range(max_relevant_sen_len - len(re_sentence)):
temm.append(0)
relevant_sentence_presentation_.append(temm)
for jj in range(max_sen_num-len(relevant_sentence_presentation)):
relevant_sentence_presentation_.append([0]*max_relevant_sen_len)
# prepare for padding
text_padding = [0] * (max_len - len(text_indices))
sen_padding = [0] * (max_sen_num - len(relevant_sentences))
# generate aspect mask
aspect_mask = []
for i in aspect_sequence_label:
if i != 0:
aspect_mask.append(1)
else:
aspect_mask.append(0)
# padding for local graph
'''local_graph = local_graph.tolist()
for i in range(len(local_graph)):
for j in range(max_len - len(text_indices)):
local_graph[i].append(0)
for i in range(max_len - len(text_indices)):
local_graph.append([0]*max_len)
local_graph = np.array(local_graph)'''
# local_graph_pmi = local_graph_pmi.tolist()
# for i in range(len(local_graph_pmi)):
# for j in range(max_len - len(text_indices)):
# local_graph_pmi[i].append(0)
# for i in range(max_len - len(text_indices)):
# local_graph_pmi.append([0]*max_len)
# local_graph_pmi = np.array(local_graph_pmi)
# padding for global graph 0
global_graph0 = global_graph0.tolist()
for i in range(len(global_graph0)):
for j in range(max_sen_num-len(global_graph0[i])):
global_graph0[i].append(0)
for i in range(max_len - len(text_indices)):
global_graph0.append([0]*max_sen_num)
global_graph0 = np.array(global_graph0)
# padding for global graph 1
global_graph1_= []
for graph in global_graph1:
global_graph1_.append(graph.tolist())
for i in range(len(global_graph1_)):
for j in range(len(global_graph1_[i])):
for k in range(max_relevant_sen_len - len(global_graph1_[i][j])):
global_graph1_[i][j].append(0)
for i in range(len(global_graph1_)):
for k in range(max_sen_num - len(global_graph1_[i])):
global_graph1_[i].append([0]*max_relevant_sen_len)
global_graph1_ = np.array(global_graph1_)
tem_graph = np.zeros_like(global_graph1_[0])
tem_len = len(global_graph1_)
for k in range(max_sen_num-tem_len):
global_graph1_ = np.append(global_graph1_, [tem_graph], axis=0)
# padding for global graph 2
global_graph2_ = []
for graph in global_graph2:
global_graph2_.append(graph.tolist())
for i in range(len(global_graph2_)):
for j in range(len(global_graph2_[i])):
for k in range(max_relevant_sen_len - len(global_graph2_[i][j])):
global_graph2_[i][j].append(0)
for i in range(len(global_graph2_)):
for k in range(max_len - len(global_graph2_[i])):
global_graph2_[i].append([0]*max_relevant_sen_len)
global_graph2_ = np.array(global_graph2_)
tem_graph = np.zeros_like(global_graph2_[0])
tem_len = len(global_graph2_)
for k in range(max_sen_num-tem_len):
global_graph2_ = np.append(global_graph2_, [tem_graph], axis=0)
# padding for global graph 2
global_graph3_ = []
for graph in global_graph3:
global_graph3_.append(graph.tolist())
for i in range(len(global_graph3_)):
for j in range(len(global_graph3_[i])):
for k in range(max_relevant_sen_len - len(global_graph3_[i][j])):
global_graph3_[i][j].append(0)
for i in range(len(global_graph3_)):
for k in range(max_len - len(global_graph3_[i])):
global_graph3_[i].append([0]*max_relevant_sen_len)
global_graph3_ = np.array(global_graph3_)
tem_graph = np.zeros_like(global_graph3_[0])
tem_len = len(global_graph3_)
for k in range(max_sen_num-tem_len):
global_graph3_ = np.append(global_graph3_, [tem_graph], axis=0)
# padding for pair_grid_labels
pair_grid_label = pair_grid_label.tolist()
for i in range(len(pair_grid_label)):
for j in range(max_len - len(text_indices)):
pair_grid_label[i].append(0)
for i in range(max_len - len(text_indices)):
pair_grid_label.append([0]*max_len)
pair_grid_label = np.array(pair_grid_label)
# padding for triple_grid_label
triple_grid_label = triple_grid_label.tolist()
for i in range(len(triple_grid_label)):
for j in range(max_len - len(text_indices)):
triple_grid_label[i].append(0)
for i in range(max_len - len(text_indices)):
triple_grid_label.append([0]*max_len)
triple_grid_label = np.array(triple_grid_label)
# padding for
# padding for grid label
# aspect_grid_label = aspect_grid_label.tolist()
# for i in range(len(aspect_grid_label)):
# for j in range(max_len - len(text_indices)):
# aspect_grid_label[i].append(0)
# for i in range(max_len - len(text_indices)):
# aspect_grid_label.append([0]*max_len)
# aspect_grid_label = np.array(aspect_grid_label)
# opinion_grid_label = opinion_grid_label.tolist()
# for i in range(len(opinion_grid_label)):
# for j in range(max_len - len(text_indices)):
# opinion_grid_label[i].append(0)
# for i in range(max_len - len(text_indices)):
# opinion_grid_label.append([0]*max_len)
# opinion_grid_label = np.array(opinion_grid_label)
batch_text_indices.append(text_indices + text_padding)
batch_mask.append(mask + text_padding)
batch_aspect_mask.append(aspect_mask + text_padding)
# batch_target_triple.append(target_triple)
# batch_local_graph.append(local_graph)
# batch_local_graph_pmi.append(local_graph_pmi)
batch_global_graph0.append(global_graph0)
batch_global_graph1.append(global_graph1_)
batch_global_graph2.append(global_graph2_)
batch_global_graph3.append(global_graph3_)
batch_relevant_sentences.append(relevant_sentences + sen_padding)
batch_relevant_sentences_presentation.append(relevant_sentence_presentation_)
batch_pair_grid_labels.append(pair_grid_label)
batch_triple_grid_labels.append(triple_grid_label)
batch_aspect_sequence_labels.append(aspect_sequence_label + text_padding)
batch_opinion_sequence_labels.append(opinion_sequence_label + text_padding)
batch_sentiment_sequence_labels.append(sentiment_sequence_label + text_padding)
# batch_aspect_grid_labels.append(aspect_grid_label)
# batch_opinion_grid_labels.append(opinion_grid_label)
return {'text_indices': torch.tensor(batch_text_indices), \
'mask': torch.tensor(batch_mask),\
'aspect_mask': torch.tensor(batch_aspect_mask),\
# 'local_graph': torch.tensor(batch_local_graph),\
# 'local_graph_pmi': torch.tensor(batch_local_graph_pmi),\
'global_graph0': torch.tensor(batch_global_graph0),\
'global_graph1': torch.tensor(batch_global_graph1),\
'global_graph2': torch.tensor(batch_global_graph2),\
'global_graph3': torch.tensor(batch_global_graph3),\
'relevant_sentences': torch.tensor(batch_relevant_sentences),\
'relevant_sentences_presentation': torch.tensor(batch_relevant_sentences_presentation),\
'pair_grid_labels':torch.tensor(batch_pair_grid_labels),\
'triple_grid_labels':torch.tensor(batch_triple_grid_labels),\
'aspect_sequence_labels':torch.tensor(batch_aspect_sequence_labels),\
'opinion_sequence_labels':torch.tensor(batch_opinion_sequence_labels),\
'sentiment_sequence_labels':torch.tensor(batch_sentiment_sequence_labels),\
# 'aspect_grid_labels':torch.tensor(batch_aspect_grid_labels),\
# 'opinion_grid_labels':torch.tensor(batch_opinion_grid_labels),\
# 'target_triple': batch_target_triple
}
# b -> batch_size, n -> sentence_length, k -> number_of_relevant_sentences_for_every_sentence, m -> relevant_sentence_length
# 'text_indices': (b, n)
# 'mask': (b, n)
# 'local_graph': (b, n, n)
# 'local_graph_pmi': (b, n, n)
# 'global_graph0': (b, n, k)
# 'relevant_sentences': (b, k)
# 'relevant_sentences_presentation': (b, k, m)
# 'pair_labels': (b, n, n)
# 'sentiment_labels': (b, n, n)
# 'aspect_sequence_labels': (b, n)
# 'opinion_sequence_labels':
# 'sentiment_sequence_labels':
def __iter__(self):
if self.shuffle:
random.shuffle(self.batches)
for idx in range(self.batch_len):
yield self.batches[idx]
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,964
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/evaluation_.py
|
import numpy as np
import pdb
def convert_to_list(y_aspect, y_opinion, y_sentiment):
y_aspect_list = []
y_opinion_list = []
y_sentiment_list = []
for seq_aspect, seq_opinion, seq_sentiment in zip(y_aspect, y_opinion, y_sentiment):
l_a = []
l_o = []
l_s = []
for label_dist_a, label_dist_o, label_dist_s in zip(seq_aspect, seq_opinion, seq_sentiment):
l_a.append(np.argmax(label_dist_a))
l_o.append(np.argmax(label_dist_o))
# if not np.any(label_dist_s):
# l_s.append(0)
# else:
l_s.append(np.argmax(label_dist_s))
y_aspect_list.append(l_a)
y_opinion_list.append(l_o)
y_sentiment_list.append(l_s)
return y_aspect_list, y_opinion_list, y_sentiment_list
def score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, mask):
begin = 1
inside = 2
# predicted sentiment distribution for aspect terms that are correctly extracted
pred_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
# gold sentiment distribution for aspect terms that are correctly extracted
rel_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
# sentiment distribution for terms that get both span and sentiment predicted correctly
correct_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
# sentiment distribution in original data
total_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
polarity_map = {1: 'pos', 2: 'neg', 3: 'neu', 4:'con'}
# count of predicted conflict aspect term
predicted_conf = 0
correct, predicted, relevant = 0, 0, 0
for i in range(len(true_aspect)):
true_seq = true_aspect[i]
predict = predict_aspect[i]
for num in range(len(true_seq)):
if true_seq[num] == begin:
relevant += 1
# if not train_op:
if true_sentiment[i][num]!=0:
total_count[polarity_map[true_sentiment[i][num]]]+=1
if predict[num] == begin:
match = True
for j in range(num+1, len(true_seq)):
if true_seq[j] == inside and predict[j] == inside:
continue
elif true_seq[j] != inside and predict[j] != inside:
break
else:
match = False
break
if match:
correct += 1 # aspect extraction correct
# if not train_op:
# do not count conflict examples
if true_sentiment[i][num]!=0:
rel_count[polarity_map[true_sentiment[i][num]]]+=1 # real sentiment when aspect is correct
if predict_sentiment[i][num]!=0:
pred_count[polarity_map[predict_sentiment[i][num]]]+=1 # predict sentiment when aspect is correct
if true_sentiment[i][num] == predict_sentiment[i][num]:
correct_count[polarity_map[true_sentiment[i][num]]]+=1 # aspect and sentiment are correct
else:
predicted_conf += 1 # aspect is correct but sentiment is none
for pred in predict:
if pred == begin:
predicted += 1 # aspect nums predicted; relavent is aspect nums real
p_aspect = correct / (predicted + 1e-6)
r_aspect = correct / (relevant + 1e-6)
# F1 score for aspect extraction
f_aspect = 2 * p_aspect * r_aspect / (p_aspect + r_aspect + 1e-6)
# print(f_aspect)
acc_s, f_s, f_absa = 0, 0, 0
# # if not train_op:
# num_correct_overall = correct_count['pos']+correct_count['neg']+correct_count['neu']
# num_correct_aspect = rel_count['pos']+rel_count['neg']+rel_count['neu']
# num_total = total_count['pos']+total_count['neg']+total_count['neu']
# acc_s = num_correct_overall/(num_correct_aspect+1e-6)
# p_pos = correct_count['pos'] / (pred_count['pos']+1e-6)
# r_pos = correct_count['pos'] / (rel_count['pos']+1e-6)
# p_neg = correct_count['neg'] / (pred_count['neg']+1e-6)
# r_neg = correct_count['neg'] / (rel_count['neg']+1e-6)
# p_neu = correct_count['neu'] / (pred_count['neu']+1e-6)
# r_neu= correct_count['neu'] / (rel_count['neu']+1e-6)
# pr_s = (p_pos+p_neg+p_neu)/3,0
# re_s = (r_pos+r_neg+r_neu)/3,0
# # F1 score for AS only
# print(pr_s, re_s)
# if pr_s+re_s != 0:
# f_s = 2*pr_s*re_s/(pr_s+re_s)
# else:
# f_s = 0
# precision_absa = num_correct_overall/(predicted+1e-6 - predicted_conf)
# recall_absa = num_correct_overall/(num_total+1e-6)
# # F1 score of the end-to-end task
# f_absa = 2*precision_absa*recall_absa/(precision_absa+recall_absa+1e-6)
return f_aspect, acc_s, f_s, f_absa
# def get_metric(y_true_aspect, y_predict_aspect, y_true_sentiment, y_predict_sentiment, mask, train_op):
def get_metric(y_true_aspect, y_predict_aspect, y_true_opinion, y_predict_opinion, y_true_sentiment, y_predict_sentiment, mask):
f_a, f_o = 0, 0
true_aspect, true_opinion, true_sentiment = y_true_aspect, y_true_opinion, y_true_sentiment
predict_aspect, predict_opinion, predict_sentiment = convert_to_list(y_predict_aspect, y_predict_opinion, y_predict_sentiment)
# predict_aspect, predict_sentiment = y_predict_aspect, y_predict_sentiment
f_aspect, acc_s, f_s, f_absa = score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, mask)
f_opinion, _, _, _ = score(true_opinion, predict_opinion, true_sentiment, true_sentiment, 0)
return f_aspect, f_opinion, acc_s, f_s, f_absa
def score3(true_aspect, predict_aspect, true_opinion, predict_opinion, true_sentiment, predict_sentiment, mask, train_op=0):
begin = 1
inside = 2
# predicted sentiment distribution for aspect terms that are correctly extracted
pred_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
# gold sentiment distribution for aspect terms that are correctly extracted
rel_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
# sentiment distribution for terms that get both span and sentiment predicted correctly
correct_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
# sentiment distribution in original data
total_count = {'pos':0, 'neg':0, 'neu':0, 'con':0}
polarity_map = {1: 'pos', 2: 'neg', 3: 'neu', 4:'con'}
# count of predicted conflict aspect term
predicted_conf = 0
correct_a, predicted_a, relevant_a = 0, 0, 0
correct_o, predicted_o, relevant_o = 0, 0, 0
for i in range(len(true_aspect)):
true_seq_aspect = true_aspect[i]
predict_aspect = predict_aspect[i]
for num in range(len(true_seq)):
if true_seq[num] == begin:
relevant += 1
if not train_op:
if true_sentiment[i][num]!=0:
total_count[polarity_map[true_sentiment[i][num]]]+=1
if predict[num] == begin:
match = True
for j in range(num+1, len(true_seq)):
if true_seq[j] == inside and predict[j] == inside:
continue
elif true_seq[j] != inside and predict[j] != inside:
break
else:
match = False
break
if match:
correct += 1 # aspect extraction correct
if not train_op:
# do not count conflict examples
if true_sentiment[i][num]!=0:
rel_count[polarity_map[true_sentiment[i][num]]]+=1 # real sentiment when aspect is correct
if predict_sentiment[i][num]!=0:
pred_count[polarity_map[predict_sentiment[i][num]]]+=1 # predict sentiment when aspect is correct
if true_sentiment[i][num] == predict_sentiment[i][num]:
correct_count[polarity_map[true_sentiment[i][num]]]+=1 # aspect and sentiment are correct
else:
predicted_conf += 1 # aspect is correct but sentiment is none
for pred in predict:
if pred == begin:
predicted += 1 # aspect nums predicted; relavent is aspect nums real
p_aspect = correct / (predicted + 1e-6)
r_aspect = correct / (relevant + 1e-6)
# F1 score for aspect extraction
f_aspect = 2 * p_aspect * r_aspect / (p_aspect + r_aspect + 1e-6)
acc_s, f_s, f_absa = 0, 0, 0
if not train_op:
num_correct_overall = correct_count['pos']+correct_count['neg']+correct_count['neu']
num_correct_aspect = rel_count['pos']+rel_count['neg']+rel_count['neu']
num_total = total_count['pos']+total_count['neg']+total_count['neu']
acc_s = num_correct_overall/(num_correct_aspect+1e-6)
p_pos = correct_count['pos'] / (pred_count['pos']+1e-6)
r_pos = correct_count['pos'] / (rel_count['pos']+1e-6)
p_neg = correct_count['neg'] / (pred_count['neg']+1e-6)
r_neg = correct_count['neg'] / (rel_count['neg']+1e-6)
p_neu = correct_count['neu'] / (pred_count['neu']+1e-6)
r_neu= correct_count['neu'] / (rel_count['neu']+1e-6)
pr_s = (p_pos+p_neg+p_neu)/3,0
re_s = (r_pos+r_neg+r_neu)/3,0
# F1 score for AS only
if pr_s+re_s != 0:
f_s = 2*pr_s*re_s/(pr_s+re_s)
else:
f_s = 0
precision_absa = num_correct_overall/(predicted+1e-6 - predicted_conf)
recall_absa = num_correct_overall/(num_total+1e-6)
# F1 score of the end-to-end task
f_absa = 2*precision_absa*recall_absa/(precision_absa+recall_absa+1e-6)
return f_aspect, acc_s, f_s, f_absa
def find_pair_(label_matrix):
length = len(label_matrix)
triple = []
for i in range(length):
for j in range(length):
aspect_index, opinion_index = [], []
if label_matrix[i][j] == 1:
tem_a, tem_o = i, j
# 1down 2right 3rightdown
direction = 0
while label_matrix[tem_a][tem_o] != 0:
if label_matrix[tem_a][tem_o] == 1:
# if direction == 3:
# save
aspect_index.append(tem_a)
opinion_index.append(tem_o)
# elif direction == 1:
# flag = 1
# for t in range(j, tem_o):
# if label_matrix[tem_a][t]!=2:
# flag=0
# if flag == 1:
# aspect_index.append(tem_a)
# else: break
# elif direction == 2:
# flag = 1
# for t in range(i, tem_a):
# if label_matrix[i][tem_o]!=2:
# flag=0
# if flag == 1:
# opinion_index.append(tem_o)
# else: break
# jump
if label_matrix[tem_a+1][tem_o]==2 and label_matrix[tem_a][tem_o+1]==2 and label_matrix[tem_a+1][tem_o+1]==2:
direction=3 # right down
tem_a+=1
tem_o+=1
continue
# elif label_matrix[tem_a+1][tem_o]==2 and label_matrix[tem_a][tem_o+1]==0 and label_matrix[tem_a+1][tem_o+1]==0:
elif (tem_a<length and tem_o<length and label_matrix[tem_a+1][tem_o]==2 and label_matrix[tem_a][tem_o+1]==0 and label_matrix[tem_a+1][tem_o+1]==0) or\
(tem_a<length and tem_o>length and label_matrix[tem_a+1][tem_o]==2):
direction=1 # down
tem_a+=1
continue
# elif label_matrix[tem_a+1][tem_o]==0 and label_matrix[tem_a][tem_o+1]==2 and label_matrix[tem_a+1][tem_o+1]==0:
elif (tem_a<length and tem_o<length and label_matrix[tem_a+1][tem_o]==0 and label_matrix[tem_a][tem_o+1]==2 and label_matrix[tem_a+1][tem_o+1]==0) or\
(tem_a>length and tem_o<length and label_matrix[tem_a][tem_o+1]==2):
direction=2 # right
tem_o+=1
continue
else:
break
elif label_matrix[tem_a][tem_o] == 2:
# save
if direction == 3:
aspect_index.append(tem_a)
opinion_index.append(tem_o)
elif direction == 1:
flag = 1
for t in range(j, tem_o):
if label_matrix[tem_a][t]!=2:
flag=0
if flag == 1:
aspect_index.append(tem_a)
else: break
elif direction == 2:
flag = 1
for t in range(i, tem_a):
if label_matrix[i][tem_o]!=2:
flag=0
if flag == 1:
opinion_index.append(tem_o)
else: break
# jump
if tem_a<length and tem_o<length and label_matrix[tem_a+1][tem_o]==2 and label_matrix[tem_a][tem_o+1]==2 and label_matrix[tem_a+1][tem_o+1]==2:
direction=3
tem_a+=1
tem_o+=1
continue
elif (tem_a<length and tem_o<length and label_matrix[tem_a+1][tem_o]==2 and label_matrix[tem_a][tem_o+1]==0 and label_matrix[tem_a+1][tem_o+1]==0) or\
(tem_a<length and tem_o>length and label_matrix[tem_a+1][tem_o]==2):
direction=1
tem_a+=1
continue
elif (tem_a<length and tem_o<length and label_matrix[tem_a+1][tem_o]==0 and label_matrix[tem_a][tem_o+1]==2 and label_matrix[tem_a+1][tem_o+1]==0) or\
(tem_a>length and tem_o<length and label_matrix[tem_a][tem_o+1]==2):
direction=2
tem_o+=1
continue
else:break
if aspect_index != [] and opinion_index != []:
triple.append([aspect_index, opinion_index])
return triple
# def find_pair(label_matrix):
# length = len(label_matrix)
# triple = []
# for i in range(length):
# for j in range(length):
# aspect_index, opinion_index = [], []
# # import pdb; pdb.set_trace()
# if label_matrix[i][j] == 1:
# # aspect_index.append(i)
# # opinion_index.append(j)
# col , row, tem_len = j, i, 1
# save_length = []
# while True:
# while col+1 < len(label_matrix[1]) and label_matrix[row][col+1] == 2:
# col += 1
# tem_len += 1
# save_length.append(tem_len)
# tem_len = 1
# if row+1 < len(label_matrix) and label_matrix[row+1][j] == 2:
# row += 1
# col = j
# else: break
# max_len = max(save_length)
# aspect_index = [idx for idx in range(i, row+1)]
# opinion_index = [idx for idx in range(j, j + max_len)]
# if aspect_index != [] and opinion_index != []:
# triple.append([aspect_index, opinion_index])
# return triple
def find_pair(label_matrix):
length = len(label_matrix)
triple = []
for i in range(length):
for j in range(length):
aspect_index, opinion_index = [], []
if label_matrix[i][j] == 1:
if i > 0 and j >0:
if label_matrix[i-1][j-1] == 1: continue
if i > 0:
if label_matrix[i-1][j] == 1: continue
if j > 0:
if label_matrix[i][j-1] == 1: continue
col , row, tem_len = j, i, 1
save_length = []
while True:
while col+1 < len(label_matrix[1]) and label_matrix[row][col+1] != 0:
if label_matrix[row][col] == 1:
if label_matrix[row][col+1] == 1 or label_matrix[row][col+1] == 2:
col += 1
tem_len += 1
elif label_matrix[row][col] == 2:
if label_matrix[row][col+1] == 2:
col += 1
tem_len += 1
# col += 1
# tem_len += 1
save_length.append(tem_len)
tem_len = 1
if row+1 < len(label_matrix) and label_matrix[row+1][j] != 0:
if label_matrix[row][j] == 1:
if label_matrix[row+1][j] == 1 or label_matrix[row+1][j] == 2:
row += 1
col = j
if label_matrix[row][j] == 2:
if label_matrix[row+1][j] == 2:
row += 1
col = j
# row += 1
# col = j
else: break
max_len = max(save_length)
aspect_index = [idx for idx in range(i, row+1)]
opinion_index = [idx for idx in range(j, j + max_len)]
if aspect_index != [] and opinion_index != []:
triple.append([aspect_index, opinion_index])
return triple
def find_grid_term(label_matrix):
length = len(label_matrix)
triple = []
for i in range(length):
for j in range(length):
aspect_index, opinion_index = [], []
if label_matrix[i][j] == 1:
aspect_index.append(i)
opinion_index.append(j)
tem_a, tem_o = i, j
for t in range(tem_a, length):
if label_matrix[t][tem_o]==2:
aspect_index.append(t)
for t in range(tem_o, length):
if label_matrix[tem_a][t]==2:
opinion_index.append(t)
if aspect_index != [] and opinion_index != [] and aspect_index == opinion_index:
triple.append(aspect_index)
return triple
def find_pair_sentiment(label_matrix, sentiment_label_matrix):
length = len(label_matrix)
triple = []
for i in range(length):
for j in range(length):
aspect_index, opinion_index = [], []
if label_matrix[i][j] == 1:
if i > 0 and j >0:
if label_matrix[i-1][j-1] == 1: continue
if i > 0:
if label_matrix[i-1][j] == 1: continue
if j > 0:
if label_matrix[i][j-1] == 1: continue
col , row, tem_len = j, i, 1
save_length = []
while True:
while col+1 < len(label_matrix[1]) and label_matrix[row][col+1] != 0:
if label_matrix[row][col] == 1:
if label_matrix[row][col+1] == 1 or label_matrix[row][col+1] == 2:
col += 1
tem_len += 1
elif label_matrix[row][col] == 2:
if label_matrix[row][col+1] == 2:
col += 1
tem_len += 1
# col += 1
# tem_len += 1
save_length.append(tem_len)
tem_len = 1
if row+1 < len(label_matrix) and label_matrix[row+1][j] != 0:
if label_matrix[row][j] == 1:
if label_matrix[row+1][j] == 1 or label_matrix[row+1][j] == 2:
row += 1
col = j
if label_matrix[row][j] == 2:
if label_matrix[row+1][j] == 2:
row += 1
col = j
# row += 1
# col = j
else: break
max_len = max(save_length)
aspect_index = [idx for idx in range(i, row+1)]
opinion_index = [idx for idx in range(j, j + max_len)]
if aspect_index != [] and opinion_index != [] and sentiment_label_matrix[i][j] != 0:
triple.append([aspect_index, opinion_index, sentiment_label_matrix[i][j]])
return triple
def find_term(label_sequence):
term = []
for i in range(len(label_sequence)):
tem_term = []
if (i != 0 and label_sequence[i] == 1 and label_sequence[i-1] != 1) or (i == 0 and label_sequence[i] == 1):
tem_term.append(i)
for j in range(i+1, len(label_sequence)):
if label_sequence[j-1] == 1:
if label_sequence[j] == 2 or label_sequence[j] == 1:
tem_term.append(j)
else: break
elif label_sequence[j-1] == 2:
if label_sequence[j] == 2:
tem_term.append(j)
else: break
# if label_sequence[j] == 2:
# tem_term.append(j)
# else: break
else:continue
if tem_term != []:
term.append(tem_term)
return term
def compute_sentiment(true_aspect, predict_aspect, true_sentiment, predict_sentiment):
begin = 1
inside = 2
# predicted sentiment distribution for aspect terms that are correctly extracted
pred_count = {'pos':0, 'neg':0, 'neu':0}
# gold sentiment distribution for aspect terms that are correctly extracted
rel_count = {'pos':0, 'neg':0, 'neu':0}
# sentiment distribution for terms that get both span and sentiment predicted correctly
correct_count = {'pos':0, 'neg':0, 'neu':0}
# sentiment distribution in original data
total_count = {'pos':0, 'neg':0, 'neu':0}
polarity_map = {1: 'pos', 2: 'neg', 3: 'neu', 0:'null'}
# count of predicted conflict aspect term
predicted_conf = 0
correct, predicted, relevant = 0, 0, 0
for i in range(len(true_aspect)):
true_seq = true_aspect[i]
predict = predict_aspect[i]
for num in range(len(true_seq)):
if true_seq[num] == begin:
relevant += 1
if true_sentiment[i][num]!=0:
total_count[polarity_map[true_sentiment[i][num]]]+=1
if predict[num] == begin:
match = True
for j in range(num+1, len(true_seq)):
if true_seq[j] == inside and predict[j] == inside:
continue
elif true_seq[j] != inside and predict[j] != inside:
break
else:
match = False
break
if match:
correct += 1
# do not count conflict examples
if true_sentiment[i][num]!=0:
rel_count[polarity_map[true_sentiment[i][num]]]+=1
if predict_sentiment[i][num] != 0:
pred_count[polarity_map[predict_sentiment[i][num]]]+=1
if true_sentiment[i][num] == predict_sentiment[i][num]:
correct_count[polarity_map[true_sentiment[i][num]]]+=1
else:
predicted_conf += 1
for pred in predict:
if pred == begin:
predicted += 1
p_aspect = correct / (predicted + 1e-6)
r_aspect = correct / (relevant + 1e-6)
# F1 score for aspect extraction
f_aspect = 2 * p_aspect * r_aspect / (p_aspect + r_aspect + 1e-6)
acc_s, f_s, f_absa = 0, 0, 0
num_correct_overall = correct_count['pos']+correct_count['neg']+correct_count['neu']
num_correct_aspect = rel_count['pos']+rel_count['neg']+rel_count['neu']
num_total = total_count['pos']+total_count['neg']+total_count['neu']
acc_s = num_correct_overall/(num_correct_aspect+1e-6)
p_pos = correct_count['pos'] / (pred_count['pos']+1e-6)
r_pos = correct_count['pos'] / (rel_count['pos']+1e-6)
p_neg = correct_count['neg'] / (pred_count['neg']+1e-6)
r_neg = correct_count['neg'] / (rel_count['neg']+1e-6)
p_neu = correct_count['neu'] / (pred_count['neu']+1e-6)
r_neu= correct_count['neu'] / (rel_count['neu']+1e-6)
pr_s = (p_pos+p_neg+p_neu)/3.0
re_s = (r_pos+r_neg+r_neu)/3.0
# F1 score for AS only
f_s = 2*pr_s*re_s/(pr_s+re_s+1e-6)
precision_absa = num_correct_overall/(predicted+1e-6 - predicted_conf)
recall_absa = num_correct_overall/(num_total+1e-6)
# F1 score of the end-to-end task
f_absa = 2*precision_absa*recall_absa/(precision_absa+recall_absa+1e-6)
return f_s, f_absa
if __name__ == '__main__':
# test
tem1 = [[0,1,2,0],[0,2,2,0],[0,2,2,0],[0,0,0,0]]
tem1_ = [[0,2,2,0],[0,2,2,0],[0,2,2,0],[0,0,0,0]]
tem2 = [[0,1,2,0],[0,2,2,0],[0,0,0,0],[0,0,0,0]]
tem2_ = [[0,3,3,0],[0,3,3,0],[0,0,0,0],[0,0,0,0]]
tem3 = [[0,1,2,2],[0,2,2,2],[0,0,0,0],[0,0,0,0]]
tem3_ = [[0,2,2,2],[0,2,2,2],[0,0,0,0],[0,0,0,0]]
tem4 = [[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 1, 2, 2, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
tem4_ = [[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
tem5 = [[0,1,2,2],[0,0,0,0],[1,0,0,0],[0,0,0,0]]
tem6 = [[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 1, 1, 2, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
tem6_ = [[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
tem7 = [[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 1, 2, 2, 0],\
[0, 0, 0, 0, 0, 1, 2, 2, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
tem7_ = [[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 2, 2, 2, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
print(find_pair(tem1))
print(find_pair(tem2))
print(find_pair(tem3))
print(find_pair(tem4))
print(find_pair(tem5))
# import torch
# print(torch.tensor(tem3).shape)
pdb.set_trace()
tem = [0,1,2,2,0,0,1,0,0,1,2]
print(find_term(tem))
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,965
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/models/__init__.py
|
# -*- coding: utf-8 -*-
from models.ts import TS
from models.ts0 import TS0
from models.ts1 import TS1
from models.ts2 import TS2
from models.ts3 import TS3
from models.ts1_3 import TS1_3
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,966
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/write_results/check_results.py
|
import numpy as np
import pdb
import sys
import torch
sys.path.append("..")
from evaluation_bert import find_pair, find_term, find_pair_sentiment
domain = 'res14'
a_l = np.load(domain + '/' + domain + 'labels_a.npy', allow_pickle=True)
a_r = np.load(domain + '/' + domain + 'results_a.npy', allow_pickle=True)
o_l = np.load(domain + '/' + domain + 'labels_o.npy', allow_pickle=True)
o_r = np.load(domain + '/' + domain + 'results_o.npy', allow_pickle=True)
p_l = np.load(domain + '/' + domain + 'labels_p.npy', allow_pickle=True)
p_r = np.load(domain + '/' + domain + 'results_p.npy', allow_pickle=True)
ps_l = np.load(domain + '/' + domain + 'labels_ps.npy', allow_pickle=True)
ps_r = np.load(domain + '/' + domain + 'results_ps.npy', allow_pickle=True)
a_l = [l for sub_list in a_l for l in sub_list]
a_r = [l for sub_list in a_r for l in sub_list]
o_l = [l for sub_list in o_l for l in sub_list]
o_r = [l for sub_list in o_r for l in sub_list]
p_l = [l for sub_list in p_l for l in sub_list]
p_r = [l for sub_list in p_r for l in sub_list]
ps_l = [l for sub_list in ps_l for l in sub_list]
ps_r = [l for sub_list in ps_r for l in sub_list]
results = open(domain + '_results.txt', 'a')
for i in range(len(a_l)):
aspect_l, aspect_r = find_term(a_l[i]), find_term(torch.argmax(torch.tensor(a_r[i]), -1))
opinion_l, opinion_r = find_term(o_l[i]), find_term(torch.argmax(torch.tensor(o_r[i]), -1))
pair_l, pair_r = find_pair(p_l[i]), find_pair(torch.argmax(torch.tensor(p_r[i]),-1))
pair_sentiment_l, pair_sentiment_r = find_pair_sentiment(p_l[i], ps_l[i]), find_pair_sentiment(torch.argmax(torch.tensor(p_r[i]),-1), torch.argmax(torch.tensor(ps_r[i]),-1))
for tri in pair_sentiment_r:
tri[-1] = tri[-1].item()
if pair_sentiment_l != pair_sentiment_r or pair_l != pair_r:
results.write(str(i)+'\n')
results.write('True:' + str(pair_sentiment_l) + '\n')
results.write('Results:' + str(pair_sentiment_r) + '\n')
# pdb.set_trace()
results.close()
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,967
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/bert_models/bert_init.py
|
# -*- coding: utf-8 -*-
from layers.dynamic_rnn import DynamicLSTM
import torch
import torch.nn as nn
import pdb
import torch.nn.functional as F
from transformers import AutoModel, AutoTokenizer
from transformers import BertForTokenClassification
# from transformers import BertModel, BertTokenizer
def generate_formal_adj(init_adj):
'''input: a simple adj with a size of (row, column)
output: a complete and formal adj with a size of (row+column, row+column)'''
batch, row, column = init_adj.shape
# up left matrix (batch, row, row)
lu = torch.tensor(np.zeros((batch, row, row)).astype('float32')).cuda()
# up right (batch, row, column)
ru = init_adj.cuda()
# down left (batch, column, row)
ld = init_adj.transpose(1, 2).cuda()
# down right (batch, column, column)
rd = torch.tensor(np.zeros((batch, column, column)).astype('float32')).cuda()
# up (batch, row, row+column)
up = torch.cat([lu.float(), ru.float()], -1).cuda()
# down (batch, column, row+column)
down = torch.cat([ld.float(), rd.float()], -1).cuda()
# final (batch, row+column, row+column)
final = torch.cat([up,down],1).cuda()
return final.cuda()
def preprocess_adj(A):
'''
for batch data
Pre-process adjacency matrix
:param A: adjacency matrix
:return:
'''
# prepare
assert A.shape[-1] == A.shape[-2]
batch = A.shape[0]
num = A.shape[-1]
# generate eye
I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
#
A_hat = A.cuda() + I
#
D_hat_diag = torch.sum(A_hat.cuda(), axis=-1)
#
D_hat_diag_inv_sqrt = torch.pow(D_hat_diag.cuda(), -0.5)
# inf
D_hat_diag_inv_sqrt = torch.where(torch.isinf(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
D_hat_diag_inv_sqrt = torch.where(torch.isnan(D_hat_diag_inv_sqrt.cuda()), torch.full_like(D_hat_diag_inv_sqrt.cuda(), 0), D_hat_diag_inv_sqrt.cuda())
#
tem_I = torch.eye(num).unsqueeze(0).repeat(batch, 1, 1).cuda()
D_hat_diag_inv_sqrt_ = D_hat_diag_inv_sqrt.unsqueeze(-1).repeat(1,1,num).cuda()
D_hat_inv_sqrt = D_hat_diag_inv_sqrt_ * tem_I
#
return torch.matmul(torch.matmul(D_hat_inv_sqrt.cuda(), A_hat.cuda()), D_hat_inv_sqrt.cuda())
class SequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class CustomizeSequenceLabelForAO(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(CustomizeSequenceLabelForAO, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_obj = nn.Linear(hidden_size, int(hidden_size / 2))
self.linear_a = nn.Linear(hidden_size, self.tag_size)
self.linear_o = nn.Linear(hidden_size, self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
# share
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
# ATE
features_tmp_a = self.hidden2tag_sub(input_features)
features_tmp_a = nn.ReLU()(features_tmp)
features_tmp_a = self.dropout(features_tmp)
# OTE
features_tmp_o = self.hidden2tag_obj(input_features)
features_tmp_o = nn.ReLU()(features_tmp)
features_tmp_o = self.dropout(features_tmp)
# cat
features_for_a = torch.cat([features_tmp, features_tmp_a], -1)
features_for_o = torch.cat([features_tmp, features_tmp_o], -1)
# classifier
sub_output = self.linear_a(features_for_a)
obj_output = self.linear_a(features_for_o)
return sub_output, obj_output
class SequenceLabelForAOS(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForAOS, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_senti = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
senti_output = self.hidden2tag_senti(features_tmp)
return sub_output, obj_output, senti_output
class SequenceLabelForTriple(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(SequenceLabelForTriple, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag_sub = nn.Linear(int(hidden_size / 2), self.tag_size)
self.hidden2tag_obj = nn.Linear(int(hidden_size / 2), self.tag_size+1)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
"""
Args:
input_features: (bs, seq_len, h)
"""
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
sub_output = self.hidden2tag_sub(features_tmp)
obj_output = self.hidden2tag_obj(features_tmp)
return sub_output, obj_output
class MultiNonLinearClassifier(nn.Module):
def __init__(self, hidden_size, tag_size, dropout_rate):
super(MultiNonLinearClassifier, self).__init__()
self.tag_size = tag_size
self.linear = nn.Linear(hidden_size, int(hidden_size / 2))
self.hidden2tag = nn.Linear(int(hidden_size / 2), self.tag_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
features_tmp = self.linear(input_features)
features_tmp = nn.ReLU()(features_tmp)
features_tmp = self.dropout(features_tmp)
features_output = self.hidden2tag(features_tmp)
return features_output
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, text, adj):
hidden = torch.matmul(text, self.weight)
denom = torch.sum(adj, dim=2, keepdim=True) + 1
# adj = torch.tensor(adj)
adj = torch.tensor(adj, dtype=torch.float32)
# hidden = torch.tensor(hidden)
hidden = torch.tensor(hidden, dtype=torch.float32)
output = torch.matmul(adj.cuda(), hidden.cuda()) / denom.cuda()
# print(output.shape)
# print(self.bias.shape)
if self.bias is not None:
return output + self.bias
else:
return output
class PairGeneration(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, features, bias=False):
super(PairGeneration, self).__init__() # 32,13,300 32,300,13
self.features = features
# self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(features, features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(features))
else:
self.register_parameter('bias', None)
def forward(self, text):
hidden = torch.matmul(text.float(), self.weight)
# print(hidden.shape)
# denom = torch.sum(adj, dim=2, keepdim=True) + 1
# adj = torch.tensor(adj, dtype=torch.float32)
hidden_ = torch.tensor(hidden, dtype=torch.float32)
# print(hidden_.shape)
output = torch.matmul(hidden_, hidden.permute(0,2,1))
# print(output.shape)
if self.bias is not None:
return output + self.bias
else:
return output
class PairGeneration0(nn.Module):
def __init__(self, features, bias=False):
super(PairGeneration0, self).__init__() # 32,13,300 32,300,13
self.features = features
# self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(features, features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(features))
else:
self.register_parameter('bias', None)
def forward(self, text):
hidden_1 = torch.unsqueeze(text,1).repeat(1,text.shape[1],1,1)
hidden_2 = torch.unsqueeze(text,2).repeat(1,1,text.shape[1],1)
output = torch.cat((hidden_1, hidden_2),-1)
return output
class BERT_GCN(nn.Module):
def __init__(self, opt, freeze_bert = False):
super(BERT_GCN, self).__init__()
pretrained_model = opt.bert_type
# 'roberta-base', 'roberta-large', 'bert-base-uncased', 'bert-large-uncased'
dim_dic = {'roberta-base': 768, 'roberta-large': 1024, 'bert-base-uncased': 768, 'bert-large-uncased':1024}
self.dim = dim_dic[pretrained_model]
self.opt = opt
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
self.bert_model = AutoModel.from_pretrained(pretrained_model)
if freeze_bert:
for param in self.bert_model.parameters():
param.requires_grad = False
self.text_embed_dropout = nn.Dropout(0.5)
self.pairgeneration = PairGeneration0(self.dim)
# new classifier
self.aspect_opinion_classifier = SequenceLabelForAO(self.dim, 3, 0.5)
self.triple_classifier = SequenceLabelForTriple(self.dim*2, 3, 0.5)
self.aspect_classifier = nn.Linear(self.dim, 3)
self.opinion_classifier = nn.Linear(self.dim, 3)
self.pair_classifier = nn.Linear(self.dim*2, 3)
self.pair_sentiment_classifier = nn.Linear(self.dim*2, 4)
print('bert_gcn!!!')
def forward(self, inputs, mask):
# input
text_indices, mask, aspect_labels, opinion_labels, pair_labels, triple_labels = inputs
# prepare
batch_size = text_indices.shape[0]
sentence_len = text_indices.shape[1]
# get sentence mask
mask_ = torch.unsqueeze(mask, -1)
# input sentnece s_0
word_embeddings = self.bert_model(text_indices, mask)[0]
text_out = self.text_embed_dropout(word_embeddings)
# pair generation
pair_text = self.pairgeneration(text_out)
# AE and OE scores
aspect_probs, opinion_probs = self.aspect_opinion_classifier(text_out.float())
# aspect_probs, opinion_probs = self.aspect_classifier(text_out.float()), self.opinion_classifier(text_out.float())
aspect_probs, opinion_probs = aspect_probs.contiguous().view(-1, 3), opinion_probs.contiguous().view(-1, 3)
pair_probs_, pair_sentiment_probs_ = self.triple_classifier(pair_text.float())
# pair_probs_, pair_sentiment_probs_ = self.pair_classifier(pair_text.float()), self.pair_sentiment_classifier(pair_text.float())
pair_probs = pair_probs_.contiguous().view(-1, 3)
pair_sentiment_probs = pair_sentiment_probs_.contiguous().view(-1, 4)
return aspect_probs, opinion_probs, pair_probs, pair_sentiment_probs
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,968
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/bert_data_utils.py
|
# -*- coding: utf-8 -*-
import os
import pickle
import numpy as np
import pdb
from tqdm import tqdm
from transformers import AutoModel, AutoTokenizer
class ABSADataset(object):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ABSADatasetReader:
@staticmethod
def __read_text__(fnames):
'''a string: sentence1\nsentence2\n...sentencen\n'''
text = ''
for fname in fnames:
fin = open(fname)
lines = fin.readlines()
fin.close()
for i in range(0, len(lines)):
text += lines[i].split('####')[0].lower().strip()+'\n'
return text
@staticmethod
def __read_triplets__(fnames):
'''a list: [[([2], [5], 'NEG')], [(),()], [], ..., []]'''
triplets = []
for fname in fnames:
fin = open(fname)
lines = fin.readlines()
fin.close()
for i in range(0, len(lines)):
triple = eval(lines[i].split('####')[1])
triplets.append(triple)
return triplets
@staticmethod
def __read_all_sentence__(domain, tokenizer):
'''read all sentence (train/dev/test) to get the representation of relevant sentences'''
train_data = open('./ASTE-Data-V2/'+domain+'/train_triplets.txt','r').readlines()
dev_data = open('./ASTE-Data-V2/'+domain+'/dev_triplets.txt','r').readlines()
test_data = open('./ASTE-Data-V2/'+domain+'/test_triplets.txt','r').readlines()
train_sentences = [line.split('####')[0] for line in train_data]
dev_sentences = [line.split('####')[0] for line in dev_data]
test_sentences = [line.split('####')[0] for line in test_data]
all_sentences = train_sentences + dev_sentences + test_sentences
max_tokens_len = 0 # the max length in this dataset (train, test, dev)
for sentence in all_sentences:
tokens = tokenizer.tokenize(sentence)
if len(tokens) > max_tokens_len:
max_tokens_len = len(tokens)
return all_sentences, max_tokens_len
@staticmethod
def __triple2bio__(sentences, triplets, max_tokens_len):
'''
max_tokens_len in cludes sep and cls
convert triplets to BIO labels
if the first word in the aspect term is tokenized into more than one tokens
all token will be denoted as 1 instead of 2.
000120000000
000000011220
000330000000
pos1, neg2, neu3
'''
# sentences = sentences.strip('\n').split('\n')
sentiment_dic = {'POS':1, 'NEG':2, 'NEU':3}
aspect_labels, opinion_labels, sentiment_labels = [], [], []
for sentence, triplet in zip(sentences, triplets):
# sentence = sentence.strip('\n').split()
a_labels = [0 for i in range(max_tokens_len)]
o_labels = [0 for i in range(max_tokens_len)]
s_labels = [0 for i in range(max_tokens_len)]
for tri in triplet:
begin, inside = 1, 2
a_index, o_index, polarity, num_a, num_o = tri
for i in range(len(a_index)):
if num_a > 0:
a_labels[a_index[i]+1] = begin
s_labels[a_index[i]+1] = sentiment_dic[polarity]
num_a = num_a - 1
else:
a_labels[a_index[i]+1] = inside
s_labels[a_index[i]+1] = sentiment_dic[polarity]
# if i == 0:
# a_labels[a_index[i]+1] = begin
# s_labels[a_index[i]+1] = sentiment_dic[polarity]
# else:
# a_labels[a_index[i]+1] = inside
# s_labels[a_index[i]+1] = sentiment_dic[polarity]
for i in range(len(o_index)):
if num_o > 0:
o_labels[o_index[i]+1] = begin
num_o = num_o -1
else:
o_labels[o_index[i]+1] = inside
# if i == 0:
# o_labels[o_index[i]+1] = begin
# else:
# o_labels[o_index[i]+1] = inside
aspect_labels.append(a_labels)
opinion_labels.append(o_labels)
sentiment_labels.append(s_labels)
return aspect_labels, opinion_labels, sentiment_labels
@staticmethod
def __triple2span__(sentences, triplets):
'''
convert bio labels to span labels
00000
01000
00000
00000
the index of 1 denotes the start and end of term
'''
sentences = sentences.strip('\n').split('\n')
aspect_span, opinion_span = [], []
for sentence, triple in zip(sentences, triplets):
sentence = sentence.strip('\n').split()
matrix_span_aspect = np.zeros((len(sentence), len(sentence))).astype('float32')
matrix_span_opinion = np.zeros((len(sentence), len(sentence))).astype('float32')
for tri in triple:
a_start, a_end, o_start, o_end = tri[0][0], tri[0][-1], tri[1][0], tri[1][-1]
matrix_span_aspect[a_start][a_end] = 1
matrix_span_opinion[o_start][o_end] = 1
aspect_span.append(matrix_span_aspect)
opinion_span.append(matrix_span_opinion)
return aspect_span, opinion_span
@staticmethod
def __triple2grid__(sentences, triplets, max_tokens_len):
'''
max_tokens_len includes sep and cls
convert triplets to grid label for pair and triplet
row aspect, col opinion
00000 00000
01220 03330
02220 03330 pos1 neg2 neu3
'''
sentiment_dic = {'POS':1, 'NEG':2, 'NEU':3}
pair_grid_labels, triple_grid_labels = {}, {}
for i in range(len(sentences)):
sentence, triplet = sentences[i], triplets[i]
matrix_pair = np.zeros((max_tokens_len, max_tokens_len)).astype('float32')
matrix_triple = np.zeros((max_tokens_len, max_tokens_len)).astype('float32')
for tri in triplet:
# for j in tri[0]:
# matrix_pair[j+1][tri[1][0]+1] = 2
# matrix_triple[j+1][tri[1][0]+1] = sentiment_dic[tri[2]]
# for k in tri[1]:
# matrix_pair[tri[0][0]+1][k+1] = 2
# matrix_triple[tri[0][0]+1][k+1] = sentiment_dic[tri[2]]
for j in tri[0]:
for k in tri[1]:
matrix_pair[j+1][k+1] = 2
matrix_triple[j+1][k+1] = sentiment_dic[tri[2]]
first_tokens_in_aspect = tri[3]
first_tokens_in_opinion = tri[4]
for k in tri[0][:first_tokens_in_aspect]:
for l in tri[1][:first_tokens_in_opinion]:
matrix_pair[k+1][l+1] = 1
# matrix_pair[tri[0][0]+1][tri[1][0]+1] = 1
pair_grid_labels[i] = matrix_pair
triple_grid_labels[i] = matrix_triple
return pair_grid_labels, triple_grid_labels
@staticmethod
def __mask__(sentences):
# sentences = sentences.strip('\n').split('\n')
mask = []
for sentence in sentences:
# sentence = sentence.strip('\n').split()
mask.append([1]*len(sentence))
return mask
@staticmethod
def __conver_triplet2subtriple__(sentence, triplets, tokenizer):
sentence_list = sentence.strip('\n').split('\n') # a list containing all sentences (string)
new_sentence_list, new_triplets = [], [] # similar to the init sentence and triplets
for sentence, triplet in zip(sentence_list, triplets):
# to get the sub_word_list and new_triplets for this sentence
# tokenize the word and construct a dictionary: key: init_idx, value: new_idx for subword in the new subword_list
word_list = sentence.strip('\n').split() # ['but', 'the', 'staff', 'was', 'so', 'horrible', 'to', 'us', '.']
subword_list, new_triplet = [], []
index_dic = {} # []
count = 0
for i in range(len(word_list)):
word = word_list[i]
tokenized_word = tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
subword_list.extend([tokenized_word])
index_dic[i] = [x for x in range(count, count + n_subwords)]
count += n_subwords
subword_list = [subword for token in subword_list for subword in token]
for tri in triplet:
init_a, init_o, init_s = tri
new_a = [new_idx for token in [index_dic[idx] for idx in init_a] for new_idx in token]
new_o = [new_idx for token in [index_dic[idx] for idx in init_o] for new_idx in token]
new_s = init_s
num_tokens_of_the_first_aspect = len(index_dic[init_a[0]])
num_tokens_of_the_first_opinion = len(index_dic[init_o[0]])
#######
new_tri = (new_a, new_o, new_s, num_tokens_of_the_first_aspect, num_tokens_of_the_first_opinion)
new_triplet.append(new_tri)
new_triplets.append(new_triplet)
new_sentence_list.append(subword_list)
return new_sentence_list, new_triplets
@staticmethod
def __read_data__(fname, domain, phase, tokenizer):
'''
[cls] and [sep] are not included in __conver_triplet2subtriple__,
all label convert functions takes [cls] and [sep] into consideration
'''
# read raw data
sentence = ABSADatasetReader.__read_text__([fname]) # a long string splited by '\n'
triplets = ABSADatasetReader.__read_triplets__([fname]) # a long list containing multiple lists for sentences [([16, 17], [15], 'POS')]
# ['but', 'the', 'staff', 'was', 'so', 'horrible', 'to', 'us', '.']
# [([2], [5], 'NEG')]
new_sentence_list, new_triplets = ABSADatasetReader.__conver_triplet2subtriple__(sentence, triplets, tokenizer)
# max_tokens_len is the max length in this dataset (train, test, dev)
all_sentences, max_tokens_len = ABSADatasetReader.__read_all_sentence__(domain, tokenizer) # ['But the staff was so horrible to us .' ... ... ]
assert len(sentence.strip('\n').split('\n')) == len(triplets) and len(new_sentence_list) == len(new_triplets) and len(new_sentence_list) == len(triplets)
# generate basic labels
aspect_sequence_labels, opinion_sequence_labels, sentiment_sequence_labels = ABSADatasetReader.__triple2bio__(new_sentence_list, new_triplets, max_tokens_len+2)
pair_grid_labels, triple_grid_labels = ABSADatasetReader.__triple2grid__(new_sentence_list, new_triplets, max_tokens_len+2)
text_mask = ABSADatasetReader.__mask__(sentence)
# read relevant sentences
relevant_sentences_index = open('./ASTE-Rele-Sentences/'+domain + '/' + phase + '_r5.txt', 'r').read().split('\n')
# local graph
# local_graph = pickle.load(open('./ASTE-Graph-V2/' + domain + '/local_graph/' + phase + '_l.graph', 'rb'))
# four types of global graphs
# global_graph0 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph0/' + phase + '_g5.graph', 'rb'))
# global_graph1 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph1/' + phase + '_g5.graph', 'rb'))
# global_graph2 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph2/' + phase + '_g5.graph', 'rb'))
# global_graph3 = pickle.load(open('./ASTE-Graph-V2/' + domain + '/global_graph3/' + phase + '_g5.graph', 'rb'))
# store all data for bucket
all_data = []
lines = sentence.strip('\n').split('\n')
for i in range(0, len(lines)):
# raw text, text indices and text mask
text = lines[i].lower().strip()
input = tokenizer(text, max_length=max_tokens_len + 2, truncation=True, padding='max_length', return_tensors='pt')
text_indices, mask = input.input_ids[0].tolist(), input.attention_mask[0].tolist()
# index of relevant sentence for this sentence
relevant_sentences = [int(idx) for idx in relevant_sentences_index[i].strip().split()]
# indieces of relevant sentence for this sentence (representation)
relevant_sentences_presentation, relevant_sentences_mask = [], []
for mm in relevant_sentences:
tem_sentence = all_sentences[mm]
relevant_input = tokenizer(tem_sentence, max_length=max_tokens_len + 2, truncation=True, padding='max_length', return_tensors='pt')
relevant_sentences_presentation.append(relevant_input.input_ids[0].tolist())
relevant_sentences_mask.append(relevant_input.attention_mask[0].tolist())
# different graphs for this sentence
# local_graph_ = local_graph[i]
# global_graph_0, global_graph_1, global_graph_2, global_graph_3 = \
# global_graph0[i], global_graph1[i], global_graph2[i], global_graph3[i]
# different labels for this sentence
aspect_sequence_label, opinion_sequence_label, sentiment_sequence_label, pair_grid_label, triple_grid_label = \
aspect_sequence_labels[i], opinion_sequence_labels[i], sentiment_sequence_labels[i], \
pair_grid_labels[i], triple_grid_labels[i]
# package
data = {
'text_indices': text_indices,
'mask': mask,
# 'global_graph0': global_graph_0,
# 'global_graph1': global_graph_1,
# 'global_graph2': global_graph_2,
# 'global_graph3': global_graph_3,
# 'local_graph': local_graph_,
'relevant_sentences': relevant_sentences,
'relevant_sentence_presentation':relevant_sentences_presentation,
'relevant_sentences_mask':relevant_sentences_mask,
'aspect_sequence_label': aspect_sequence_label,
'opinion_sequence_label': opinion_sequence_label,
'sentiment_sequence_label': sentiment_sequence_label,
# 'aspect_span_labels': aspect_span_label,
# 'opinion_span_labels': opinion_span_label,
'pair_grid_labels': pair_grid_label,
'triple_grid_labels': triple_grid_label
}
all_data.append(data)
return all_data
def __init__(self, dataset='res14', embed_dim=300):
print("preparing {0} dataset ...".format(dataset))
fname = {
'res14': {
'train': './ASTE-Data-V2/res14/train_triplets.txt',
'test': './ASTE-Data-V2/res14/test_triplets.txt',
'dev': './ASTE-Data-V2/res14/dev_triplets.txt'
},
'lap14': {
'train': './ASTE-Data-V2/lap14/train_triplets.txt',
'test': './ASTE-Data-V2/lap14/test_triplets.txt',
'dev': './ASTE-Data-V2/lap14/dev_triplets.txt'
},
'res15': {
'train': './ASTE-Data-V2/res15/train_triplets.txt',
'test': './ASTE-Data-V2/res15/test_triplets.txt',
'dev': './ASTE-Data-V2/res15/dev_triplets.txt'
},
'res16': {
'train': './ASTE-Data-V2/res16/train_triplets.txt',
'test': './ASTE-Data-V2/res16/test_triplets.txt',
'dev': './ASTE-Data-V2/res16/dev_triplets.txt'
},
'mams': {
'train': './ASTE-Data-V2/res16/train_triplets.txt',
'test': './ASTE-Data-V2/res16/test_triplets.txt',
'dev': './ASTE-Data-V2/res16/dev_triplets.txt'
}
}
# text = ABSADatasetReader.__read_text__([fname[dataset]['train'], fname[dataset]['dev'], fname[dataset]['test']])
# if os.path.exists(dataset+'_word2idx.pkl'):
# print("loading {0} tokenizer...".format(dataset))
# with open(dataset+'_word2idx.pkl', 'rb') as f:
# word2idx = pickle.load(f)
# tokenizer = Tokenizer(word2idx=word2idx)
# else:
# tokenizer = Tokenizer()
# tokenizer.fit_on_text(text)
# with open(dataset+'_word2idx.pkl', 'wb') as f:
# pickle.dump(tokenizer.word2idx, f)
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
# self.embedding_matrix = build_embedding_matrix(tokenizer.word2idx, embed_dim, dataset)
self.train_data = ABSADataset(ABSADatasetReader.__read_data__(fname=fname[dataset]['train'], domain=dataset, phase='train', tokenizer=tokenizer))
self.dev_data = ABSADataset(ABSADatasetReader.__read_data__(fname=fname[dataset]['dev'], domain=dataset, phase='dev', tokenizer=tokenizer))
self.test_data = ABSADataset(ABSADatasetReader.__read_data__(fname=fname[dataset]['test'], domain=dataset, phase='test', tokenizer=tokenizer))
if __name__ == '__main__':
# tokenizer = Tokenizer()
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
ABSADatasetReader.__read_data__(fname='./ASTE-Data-V2/res14/train_triplets.txt', domain='res14', phase='train', tokenizer=tokenizer)
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,539,969
|
dumpmemory/ASTE-Glove-Bert
|
refs/heads/master
|
/select_sentence.py
|
import os
import random
import numpy as np
import pickle as pkl
import scipy.sparse as sp
from math import log
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer
import sys
from scipy.spatial.distance import cosine
import pdb
from tqdm import tqdm
import time
import heapq
from transformers import AutoModel, AutoTokenizer
import torch.nn as nn
import torch
# datasets = ['res14', 'lap14', 'res15', 'res16']
# dataset = sys.argv[1]
# if dataset not in datasets:
# sys.exit("wrong dataset name")
# w_s_tf_idf = load_dict('../'+dataset+'_tf_idf')
def save_dict(obj, name):
with open(name + '.pkl', 'wb') as f:
pkl.dump(obj, f, pkl.HIGHEST_PROTOCOL)
def load_dict(name):
with open(name + '.pkl', 'rb') as f:
return pkl.load(f)
def read_all_sentence(domain):
'''read all sentence (train/dev/test) to get the representation of relevant sentences'''
train_data = open('./ASTE-Data-V2/'+domain+'/train_triplets.txt','r').readlines()
dev_data = open('./ASTE-Data-V2/'+domain+'/dev_triplets.txt','r').readlines()
test_data = open('./ASTE-Data-V2/'+domain+'/test_triplets.txt','r').readlines()
train_sentences = [line.split('####')[0] for line in train_data]
dev_sentences = [line.split('####')[0] for line in dev_data]
test_sentences = [line.split('####')[0] for line in test_data]
all_sentences = train_sentences + dev_sentences + test_sentences
return all_sentences
def read_text(fnames):
'''a string: sentence1\nsentence2\n...sentencen\n'''
text = ''
for fname in fnames:
fin = open(fname)
lines = fin.readlines()
fin.close()
for i in range(0, len(lines)):
text += lines[i].split('####')[0].lower().strip()+'\n'
return text
def read_triplets(fnames):
'''a list: [[([2], [5], 'NEG')], [(),()], [], ..., []]'''
triplets = []
for fname in fnames:
fin = open(fname)
lines = fin.readlines()
fin.close()
for i in range(0, len(lines)):
triple = eval(lines[i].split('####')[1])
triplets.append(triple)
return triplets
def select_sentence(domain):
# read data
all_sentences = read_all_sentence(domain)
# read num of sentences in train, dev, test
train_len, dev_len, test_len = \
len(open('./ASTE-Data-V2/'+domain+'/train_triplets.txt').readlines()), len(open('./ASTE-Data-V2/'+domain+'/dev_triplets.txt').readlines()), len(open('./ASTE-Data-V2/'+domain+'/test_triplets.txt').readlines())
print(train_len, dev_len, test_len)
# load bert model
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
bert_model = AutoModel.from_pretrained('bert-base-uncased')
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# get sentence representation using BERT
print('get sentence representation using BERT!')
all_sentences_feature = []
for sentence in tqdm(all_sentences):
tokens = tokenizer.tokenize(sentence)
input = tokenizer(sentence, max_length=len(tokens)+2, truncation=True, padding='max_length', return_tensors='pt')
features = bert_model(input.input_ids, input.attention_mask)[0][0][0]
all_sentences_feature.append(features)
# select relevant sentences
print('select relevant sentences!')
relevant_sentences_index = []
relevant_sentences_val = []
for sen_idx in tqdm(range(len(all_sentences))):
tem_cos_sim = []
sentence = all_sentences[sen_idx]
sentence_feature = all_sentences_feature[sen_idx]
tem_r_sentence_idx = []
for feature_idx in range(len(all_sentences_feature)):
feature = all_sentences_feature[feature_idx]
if sen_idx == feature_idx:
tem_cos_sim.append(0)
else:
tem_cos_sim.append(cos(sentence_feature, feature))
# if cos(sentence_feature, feature) > gamma:
# # if feature_idx != sen_idx:
# tem_r_sentence_idx.append(feature_idx)
tem_cos_sim = torch.tensor(tem_cos_sim)
tem_r_sentence_val, tem_r_sentence_idx = tem_cos_sim.cuda().topk(1, dim=0, largest=True, sorted=True)
# tem_r_sentence_idx = heapq.nlargest(len(all_sentences[sen_idx].strip().split()), range(len(tem_cos_sim)), tem_cos_sim.__getitem__)
relevant_sentences_index.append(tem_r_sentence_idx)
relevant_sentences_val.append(tem_r_sentence_val)
train_relevant_sentences_index = relevant_sentences_index[:train_len]
dev_relevant_sentences_index = relevant_sentences_index[train_len:train_len + dev_len]
test_relevant_sentences_index = relevant_sentences_index[train_len + dev_len:]
train_relevant_sentences_val = relevant_sentences_val[:train_len]
dev_relevant_sentences_val = relevant_sentences_val[train_len:train_len + dev_len]
test_relevant_sentences_val = relevant_sentences_val[train_len + dev_len:]
# write relevant sentences, 3 denotes the top 3
train_r_sentences = open('./ASTE-Rele-Sentences/'+domain+'/train_r1.txt','a')
dev_r_sentences = open('./ASTE-Rele-Sentences/'+domain+'/dev_r1.txt','a')
test_r_sentences = open('./ASTE-Rele-Sentences/'+domain+'/test_r1.txt','a')
for i in train_relevant_sentences_index:
for ii in i :
train_r_sentences.write(str(ii.cpu().numpy().tolist())+' ')
train_r_sentences.write('\n')
for i in dev_relevant_sentences_index:
for ii in i :
dev_r_sentences.write(str(ii.cpu().numpy().tolist())+' ')
dev_r_sentences.write('\n')
for i in test_relevant_sentences_index:
for ii in i :
test_r_sentences.write(str(ii.cpu().numpy().tolist())+' ')
test_r_sentences.write('\n')
train_r_sentences.close()
dev_r_sentences.close()
test_r_sentences.close()
if __name__ == '__main__':
select_sentence('res14')
select_sentence('res15')
select_sentence('lap14')
select_sentence('res16')
|
{"/models/__init__.py": ["/models/ts.py", "/models/ts0.py", "/models/ts1.py"], "/train_with_glove.py": ["/bucket_iterator.py", "/models/__init__.py"]}
|
16,594,452
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/controls/lib/dynamic_follow/__init__.py
|
import math
import numpy as np
from common.realtime import sec_since_boot
from selfdrive.controls.lib.drive_helpers import MPC_COST_LONG
from common.numpy_fast import interp, clip
from selfdrive.config import Conversions as CV
from common.params import Params
from common.dp_time import LAST_MODIFIED_DYNAMIC_FOLLOW
from common.dp_common import get_last_modified, param_get_if_updated
from selfdrive.controls.lib.dynamic_follow.auto_df import predict
from selfdrive.controls.lib.dynamic_follow.support import LeadData, CarData, dfData, dfProfiles
travis = False
# dp
PROFILE_AUTO = 4
PROFILE_LONG = 3
PROFILE_NORMAL = 2
PROFILE_SHORT = 1
PROFILE_OFF = 0
class DynamicFollow:
def __init__(self, mpc_id):
self.mpc_id = mpc_id
self.df_profiles = dfProfiles()
self.global_df_mod = 1.
self.min_TR = 0.9
# Model variables
mpc_rate = 1 / 20.
self.model_scales = {'v_ego': [-0.06112159043550491, 37.96522521972656], 'a_lead': [-3.109330892562866, 3.3612186908721924], 'v_lead': [0.0, 35.27671432495117], 'x_lead': [2.4600000381469727, 141.44000244140625]}
self.predict_rate = 1 / 4.
self.skip_every = round(0.25 / mpc_rate)
self.model_input_len = round(45 / mpc_rate)
# Dynamic follow variables
self.default_TR = 1.8
self.TR = 1.8
# self.v_lead_retention = 2.0 # keep only last x seconds
self.v_ego_retention = 2.5
self.v_rel_retention = 1.5
self.sng_TR = 1.8 # reacceleration stop and go TR
self.sng_speed = 18.0 * CV.MPH_TO_MS
# dp params
self.last_ts = 0.
self.modified = None
self.last_modified = None
self.last_modified_check = None
self.dp_dynamic_follow = PROFILE_OFF
self.dp_dynamic_follow_last_modified = None
self.dp_dynamic_follow_multiplier_last_modified = None
self.dp_dynamic_follow_min_tr_last_modified = None
self.params = Params()
self._setup_changing_variables()
def _setup_changing_variables(self):
self.TR = self.default_TR
self.model_profile = None
self.sng = False
self.car_data = CarData()
self.lead_data = LeadData()
self.df_data = dfData() # dynamic follow data
self.last_cost = 0.0
self.last_predict_time = 0.0
self.auto_df_model_data = []
self._get_live_params() # so they're defined just in case
def update(self, CS, libmpc):
self._get_live_params()
self._update_car(CS)
self._get_profiles()
if not self.lead_data.status or self.dp_dynamic_follow == PROFILE_OFF:
self.TR = self.default_TR
else:
self._store_df_data()
self.TR = self._get_TR()
if not travis:
self._change_cost(libmpc)
return self.TR
def _get_profiles(self):
"""This receives profile change updates from dfManager and runs the auto-df prediction if auto mode"""
if self.dp_dynamic_follow == PROFILE_AUTO: # todo: find some way to share prediction between the two mpcs to reduce processing overhead
self._get_pred() # sets self.model_profile, all other checks are inside function
def _norm(self, x, name):
self.x = x
return np.interp(x, self.model_scales[name], [0, 1])
def _change_cost(self, libmpc):
TRs = [0.9, 1.8, 2.7]
costs = [1.10, 0.12, 0.05]
cost = interp(self.TR, TRs, costs)
if self.last_cost != cost:
libmpc.change_tr(MPC_COST_LONG.TTC, cost, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.last_cost = cost
def _store_df_data(self):
cur_time = sec_since_boot()
# Store custom relative accel over time
if self.lead_data.status:
if self.lead_data.new_lead:
self.df_data.v_rels = [] # reset when new lead
else:
self.df_data.v_rels = self._remove_old_entries(self.df_data.v_rels, cur_time, self.v_rel_retention)
self.df_data.v_rels.append({'v_ego': self.car_data.v_ego, 'v_lead': self.lead_data.v_lead, 'time': cur_time})
# Store our velocity for better sng
self.df_data.v_egos = self._remove_old_entries(self.df_data.v_egos, cur_time, self.v_ego_retention)
self.df_data.v_egos.append({'v_ego': self.car_data.v_ego, 'time': cur_time})
# Store data for auto-df model
self.auto_df_model_data.append([self._norm(self.car_data.v_ego, 'v_ego'),
self._norm(self.lead_data.v_lead, 'v_lead'),
self._norm(self.lead_data.a_lead, 'a_lead'),
self._norm(self.lead_data.x_lead, 'x_lead')])
while len(self.auto_df_model_data) > self.model_input_len:
del self.auto_df_model_data[0]
def _get_pred(self):
cur_time = sec_since_boot()
if self.car_data.cruise_enabled and self.lead_data.status:
if cur_time - self.last_predict_time > self.predict_rate:
if len(self.auto_df_model_data) == self.model_input_len:
pred = predict(np.array(self.auto_df_model_data[::self.skip_every], dtype=np.float32).flatten())
self.last_predict_time = cur_time
self.model_profile = int(np.argmax(pred))
def _remove_old_entries(self, lst, cur_time, retention):
return [sample for sample in lst if cur_time - sample['time'] <= retention]
def _calculate_relative_accel_new(self):
# """
# Moving window returning the following: (final relative velocity - initial relative velocity) / dT with a few extra mods
# Output properties:
# When the lead is starting to decelerate, and our car remains the same speed, the output decreases (and vice versa)
# However when our car finally starts to decelerate at the same rate as the lead car, the output will move to near 0
# >>> a = [(15 - 18), (14 - 17)]
# >>> (a[-1] - a[0]) / 1
# > 0.0
# """
min_consider_time = 0.5 # minimum amount of time required to consider calculation
if len(self.df_data.v_rels) > 0: # if not empty
elapsed_time = self.df_data.v_rels[-1]['time'] - self.df_data.v_rels[0]['time']
if elapsed_time > min_consider_time:
x = [-2.6822, -1.7882, -0.8941, -0.447, -0.2235, 0.0, 0.2235, 0.447, 0.8941, 1.7882, 2.6822]
y = [0.3245, 0.277, 0.11075, 0.08106, 0.06325, 0.0, -0.09, -0.09375, -0.125, -0.3, -0.35]
v_lead_start = self.df_data.v_rels[0]['v_lead'] # setup common variables
v_ego_start = self.df_data.v_rels[0]['v_ego']
v_lead_end = self.df_data.v_rels[-1]['v_lead']
v_ego_end = self.df_data.v_rels[-1]['v_ego']
v_ego_change = v_ego_end - v_ego_start
v_lead_change = v_lead_end - v_lead_start
if v_lead_change - v_ego_change == 0 or v_lead_change + v_ego_change == 0:
return None
initial_v_rel = v_lead_start - v_ego_start
cur_v_rel = v_lead_end - v_ego_end
delta_v_rel = (cur_v_rel - initial_v_rel) / elapsed_time
neg_pos = False
if v_ego_change == 0 or v_lead_change == 0: # FIXME: this all is a mess, but works. need to simplify
lead_factor = v_lead_change / (v_lead_change - v_ego_change)
elif (v_ego_change < 0) != (v_lead_change < 0): # one is negative and one is positive, or ^ = XOR
lead_factor = v_lead_change / (v_lead_change - v_ego_change)
if v_ego_change > 0 > v_lead_change:
delta_v_rel = -delta_v_rel # switch when appropriate
neg_pos = True
elif v_ego_change * v_lead_change > 0: # both are negative or both are positive
lead_factor = v_lead_change / (v_lead_change + v_ego_change)
if v_ego_change > 0 and v_lead_change > 0: # both are positive
if v_ego_change < v_lead_change:
delta_v_rel = -delta_v_rel # switch when appropriate
elif v_ego_change > v_lead_change: # both are negative and v_ego_change > v_lead_change
delta_v_rel = -delta_v_rel
else:
raise Exception('Uncovered case! Should be impossible to be be here')
if not neg_pos: # negative and positive require different mod code to be correct
rel_vel_mod = (-delta_v_rel * abs(lead_factor)) + (delta_v_rel * (1 - abs(lead_factor)))
else:
rel_vel_mod = math.copysign(delta_v_rel, v_lead_change - v_ego_change) * lead_factor
calc_mod = np.interp(rel_vel_mod, x, y)
if v_lead_end > v_ego_end and calc_mod >= 0:
# if we're accelerating quicker than lead but lead is still faster, reduce mod
# todo: could remove this since we restrict this mod where called
x = np.array([0, 2, 4, 8]) * CV.MPH_TO_MS
y = [1.0, -0.25, -0.65, -0.95]
v_rel_mod = np.interp(v_lead_end - v_ego_end, x, y)
calc_mod *= v_rel_mod
return calc_mod
return None
def global_profile_mod(self, profile_mod_x, profile_mod_pos, profile_mod_neg, x_vel, y_dist):
"""
This function modifies the y_dist list used by dynamic follow in accordance with global_df_mod
It also intelligently adjusts the profile mods at each breakpoint based on the change in TR
"""
if self.global_df_mod == 1.:
return profile_mod_pos, profile_mod_neg, y_dist
global_df_mod = 1 - self.global_df_mod
# Calculate new TRs
speeds = [0, self.sng_speed, 18, x_vel[-1]] # [0, 18 mph, ~40 mph, highest profile mod speed (~78 mph)]
mods = [0, 0.1, 0.7, 1] # how much to limit global_df_mod at each speed, 1 is full effect
y_dist_new = [y - (y * global_df_mod * np.interp(x, speeds, mods)) for x, y in zip(x_vel, y_dist)]
# Calculate how to change profile mods based on change in TR
# eg. if df mod is 0.7, then increase positive mod and decrease negative mod
calc_profile_mods = [(np.interp(mod_x, x_vel, y_dist) - np.interp(mod_x, x_vel, y_dist_new) + 1) for mod_x in profile_mod_x]
profile_mod_pos = [mod_pos * mod for mod_pos, mod in zip(profile_mod_pos, calc_profile_mods)]
profile_mod_neg = [mod_neg * ((1 - mod) + 1) for mod_neg, mod in zip(profile_mod_neg, calc_profile_mods)]
return profile_mod_pos, profile_mod_neg, y_dist_new
def _get_TR(self):
x_vel = [0.0, 1.8627, 3.7253, 5.588, 7.4507, 9.3133, 11.5598, 13.645, 22.352, 31.2928, 33.528, 35.7632, 40.2336] # velocities
profile_mod_x = [2.2352, 13.4112, 24.5872, 35.7632] # profile mod speeds, mph: [5., 30., 55., 80.]
if self.dp_dynamic_follow == PROFILE_AUTO: # decide which profile to use, model profile will be updated before this
# df is 0 = traffic, 1 = relaxed, 2 = roadtrip, 3 = auto
# dp is 0 = off, 1 = short, 2 = normal, 3 = long, 4 = auto
# if it's model profile, we need to convert it
if self.model_profile is None:
# when its none, we use normal instead
df_profile = PROFILE_NORMAL
else:
df_profile = self.model_profile + 1
else:
df_profile = self.dp_dynamic_follow
if df_profile == PROFILE_LONG:
y_dist = [1.3978, 1.4132, 1.4318, 1.4536, 1.485, 1.5229, 1.5819, 1.6203, 1.7238, 1.8231, 1.8379, 1.8495, 1.8535] # TRs
profile_mod_pos = [0.92, 0.7, 0.25, 0.15]
profile_mod_neg = [1.1, 1.3, 2.0, 2.3]
elif df_profile == PROFILE_SHORT: # for in congested traffic
x_vel = [0.0, 1.892, 3.7432, 5.8632, 8.0727, 10.7301, 14.343, 17.6275, 22.4049, 28.6752, 34.8858, 40.35]
# y_dist = [1.3781, 1.3791, 1.3802, 1.3825, 1.3984, 1.4249, 1.4194, 1.3162, 1.1916, 1.0145, 0.9855, 0.9562] # original
# y_dist = [1.3781, 1.3791, 1.3112, 1.2442, 1.2306, 1.2112, 1.2775, 1.1977, 1.0963, 0.9435, 0.9067, 0.8749] # avg. 7.3 ft closer from 18 to 90 mph
y_dist = [1.3781, 1.3791, 1.3457, 1.3134, 1.3145, 1.318, 1.3485, 1.257, 1.144, 0.979, 0.9461, 0.9156]
profile_mod_pos = [1.05, 1.55, 2.6, 3.75]
profile_mod_neg = [0.84, .275, 0.1, 0.05]
elif df_profile == PROFILE_NORMAL: # default to relaxed/stock
y_dist = [1.385, 1.394, 1.406, 1.421, 1.444, 1.474, 1.516, 1.534, 1.546, 1.568, 1.579, 1.593, 1.614]
profile_mod_pos = [1.0] * 4
profile_mod_neg = [1.0] * 4
else:
raise Exception('Unknown profile type: {}'.format(df_profile))
# Global df mod
profile_mod_pos, profile_mod_neg, y_dist = self.global_profile_mod(profile_mod_x, profile_mod_pos, profile_mod_neg, x_vel, y_dist)
# Profile modifications - Designed so that each profile reacts similarly to changing lead dynamics
profile_mod_pos = interp(self.car_data.v_ego, profile_mod_x, profile_mod_pos)
profile_mod_neg = interp(self.car_data.v_ego, profile_mod_x, profile_mod_neg)
if self.car_data.v_ego > self.sng_speed: # keep sng distance until we're above sng speed again
self.sng = False
if (self.car_data.v_ego >= self.sng_speed or self.df_data.v_egos[0]['v_ego'] >= self.car_data.v_ego) and not self.sng:
# if above 15 mph OR we're decelerating to a stop, keep shorter TR. when we reaccelerate, use sng_TR and slowly decrease
TR = interp(self.car_data.v_ego, x_vel, y_dist)
else: # this allows us to get closer to the lead car when stopping, while being able to have smooth stop and go when reaccelerating
self.sng = True
x = [self.sng_speed * 0.7, self.sng_speed] # decrease TR between 12.6 and 18 mph from 1.8s to defined TR above at 18mph while accelerating
y = [self.sng_TR, interp(self.sng_speed, x_vel, y_dist)]
TR = interp(self.car_data.v_ego, x, y)
TR_mods = []
# Dynamic follow modifications (the secret sauce)
x = [-26.8224, -20.0288, -15.6871, -11.1965, -7.8645, -4.9472, -3.0541, -2.2244, -1.5045, -0.7908, -0.3196, 0.0, 0.5588, 1.3682, 1.898, 2.7316, 4.4704] # relative velocity values
y = [.76, 0.62323, 0.49488, 0.40656, 0.32227, 0.23914, 0.12269, 0.10483, 0.08074, 0.04886, 0.0072, 0.0, -0.05648, -0.0792, -0.15675, -0.23289, -0.315] # modification values
TR_mods.append(interp(self.lead_data.v_lead - self.car_data.v_ego, x, y))
x = [-4.4795, -2.8122, -1.5727, -1.1129, -0.6611, -0.2692, 0.0, 0.1466, 0.5144, 0.6903, 0.9302] # lead acceleration values
y = [0.24, 0.16, 0.092, 0.0515, 0.0305, 0.022, 0.0, -0.0153, -0.042, -0.053, -0.059] # modification values
TR_mods.append(interp(self.lead_data.a_lead, x, y))
rel_accel_mod = self._calculate_relative_accel_new()
if rel_accel_mod is not None: # if available
deadzone = 2 * CV.MPH_TO_MS
if self.lead_data.v_lead - deadzone > self.car_data.v_ego:
TR_mods.append(rel_accel_mod)
x = [self.sng_speed / 5.0, self.sng_speed] # as we approach 0, apply x% more distance
y = [1.05, 1.0]
profile_mod_pos *= interp(self.car_data.v_ego, x, y) # but only for currently positive mods
TR_mod = sum([mod * profile_mod_neg if mod < 0 else mod * profile_mod_pos for mod in TR_mods]) # alter TR modification according to profile
TR += TR_mod
if self.car_data.left_blinker or self.car_data.right_blinker and df_profile != self.df_profiles.traffic:
x = [8.9408, 22.352, 31.2928] # 20, 50, 70 mph
y = [1.0, .75, .65]
TR *= interp(self.car_data.v_ego, x, y) # reduce TR when changing lanes
return float(clip(TR, self.min_TR, 2.7))
def update_lead(self, v_lead=None, a_lead=None, x_lead=None, status=False, new_lead=False):
self.lead_data.v_lead = v_lead
self.lead_data.a_lead = a_lead
self.lead_data.x_lead = x_lead
self.lead_data.status = status
self.lead_data.new_lead = new_lead
def _update_car(self, CS):
self.car_data.v_ego = CS.vEgo
self.car_data.a_ego = CS.aEgo
self.car_data.left_blinker = CS.leftBlinker
self.car_data.right_blinker = CS.rightBlinker
self.car_data.cruise_enabled = CS.cruiseState.enabled
def _get_live_params(self):
self.last_modified_check, self.modified = get_last_modified(LAST_MODIFIED_DYNAMIC_FOLLOW, self.last_modified_check, self.modified)
if self.last_modified != self.modified:
self.dp_dynamic_follow, self.dp_dynamic_follow_last_modified = param_get_if_updated("dp_dynamic_follow", "int", self.dp_dynamic_follow, self.dp_dynamic_follow_last_modified)
self.global_df_mod, self.dp_dynamic_follow_multiplier_last_modified = param_get_if_updated("dp_dynamic_follow_multiplier", "float", self.global_df_mod, self.dp_dynamic_follow_multiplier_last_modified)
if self.global_df_mod != 1.:
self.global_df_mod = clip(self.global_df_mod, .85, 9.99)
self.min_TR, self.dp_dynamic_follow_min_tr_last_modified = param_get_if_updated("dp_dynamic_follow_min_tr", "float", self.min_TR, self.dp_dynamic_follow_min_tr_last_modified)
if self.min_TR != .9:
self.min_TR = clip(self.min_TR, .85, 9.99)
self.last_modified = self.modified
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,453
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/dp_conf.py
|
#!/usr/bin/env python3.7
#pylint: skip-file
import os
import json
import time
from math import floor
'''
* type: Bool, Int8, UInt8, UInt16, Float32
* conf_type: param, struct
* dependencies needs to use struct and loaded prior so we don't have to read the param multiple times.
* update_once: True, False (the param will only load up once.)
'''
confs = [
# thermald data
{'name': 'dp_thermal_started', 'default': False, 'type': 'Bool', 'conf_type': ['struct']},
{'name': 'dp_thermal_overheat', 'default': False, 'type': 'Bool', 'conf_type': ['struct']},
# car specific
{'name': 'dp_vw', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct'], 'update_once': True},
{'name': 'dp_atl', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct'], 'update_once': True},
# full screen apps
{'name': 'dp_app_waze', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_waze_manual', 'default': 0, 'type': 'Int8', 'min': -1, 'max': 1, 'depends': [{'name': 'dp_app_waze', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_app_hr', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_hr_manual', 'default': 0, 'type': 'Int8', 'min': -1, 'max': 1, 'depends': [{'name': 'dp_app_hr', 'vals': [True]}], 'conf_type': ['param', 'struct']},
# dashcam related
{'name': 'dp_dashcam', 'default': 0, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_dashcam_hours_stored', 'default': 24, 'type': 'UInt8', 'min': 1, 'max': 255, 'depends': [{'name': 'dp_dashcam', 'vals': [True]}], 'conf_type': ['param', 'struct']},
# auto shutdown
{'name': 'dp_auto_shutdown', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_auto_shutdown_in', 'default': 90, 'type': 'UInt16', 'min': 1, 'max': 65535, 'depends': [{'name': 'dp_auto_shutdown', 'vals': [True]}], 'conf_type': ['param']},
# service
{'name': 'dp_logger', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_athenad', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_uploader', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_upload_on_mobile', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_uploader', 'vals': [True]}], 'conf_type': ['param']},
{'name': 'dp_upload_on_hotspot', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_uploader', 'vals': [True]}], 'conf_type': ['param']},
{'name': 'dp_updated', 'default': True, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_gpxd', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_hotspot_on_boot', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
# lat ctrl
{'name': 'dp_lat_ctrl', 'default': True, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_steering_limit_alert', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_lat_ctrl', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_steering_on_signal', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_lat_ctrl', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_signal_off_delay', 'default': 0, 'type': 'UInt8', 'min': 0, 'max': 10, 'conf_type': ['param', 'struct']},
# assist/auto lane change
{'name': 'dp_assisted_lc_min_mph', 'default': 45, 'type': 'Float32', 'min': 0, 'max': 255., 'conf_type': ['param', 'struct']},
{'name': 'dp_auto_lc', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_auto_lc_cont', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_auto_lc', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_auto_lc_min_mph', 'default': 60, 'type': 'Float32', 'min': 0, 'max': 255., 'depends': [{'name': 'dp_auto_lc', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_auto_lc_delay', 'default': 3, 'type': 'Float32', 'min': 0, 'max': 10., 'depends': [{'name': 'dp_auto_lc', 'vals': [True]}], 'conf_type': ['param', 'struct']},
# long ctrl
{'name': 'dp_slow_on_curve', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_allow_gas', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_max_ctrl_speed', 'default': 92., 'type': 'Float32', 'conf_type': ['param', 'struct']},
{'name': 'dp_lead_car_alert', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_lead_car_away_alert', 'default': True, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_dynamic_follow', 'default': 0, 'type': 'UInt8', 'min': 0, 'max': 4, 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_dynamic_follow_multiplier', 'default': 1., 'type': 'Float32', 'min': 0.85, 'max': 1.2, 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param']},
{'name': 'dp_dynamic_follow_min_tr', 'default': 0.9, 'type': 'Float32', 'min': 0.85, 'max': 1.6, 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param']},
{'name': 'dp_dynamic_gas', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param']},
{'name': 'dp_accel_profile', 'default': 0, 'type': 'UInt8', 'min': 0, 'max': 3, 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
# safety
{'name': 'dp_driver_monitor', 'default': True, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_steering_monitor', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driver_monitor', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_steering_monitor_timer', 'default': 70, 'type': 'UInt8', 'min': 70, 'max': 360, 'depends': [{'name': 'dp_driver_monitor', 'vals': [False]}, {'name': 'dp_steering_monitor', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_gear_check', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_temp_monitor', 'default': True, 'type': 'Bool', 'conf_type': ['param']},
# UIs
{'name': 'dp_driving_ui', 'default': True, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_screen_off_reversing', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_screen_off_driving', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_app_waze', 'vals': [False]}, {'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_speed', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]},
{'name': 'dp_ui_screen_off_driving', 'vals': [False]}, {'name': 'dp_app_waze', 'vals': [False]},
{'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_event', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]}, {'name': 'dp_ui_screen_off_driving', 'vals': [False]},
{'name': 'dp_app_waze', 'vals': [False]}, {'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_max_speed', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]}, {'name': 'dp_ui_screen_off_driving', 'vals': [False]},
{'name': 'dp_app_waze', 'vals': [False]}, {'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_face', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]}, {'name': 'dp_driver_monitor', 'vals': [True]},
{'name': 'dp_ui_screen_off_driving', 'vals': [False]}, {'name': 'dp_app_waze', 'vals': [False]},
{'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_lane', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]}, {'name': 'dp_ui_screen_off_driving', 'vals': [False]},
{'name': 'dp_app_waze', 'vals': [False]}, {'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_path', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]}, {'name': 'dp_ui_screen_off_driving', 'vals': [False]},
{'name': 'dp_app_waze', 'vals': [False]}, {'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_lead', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]}, {'name': 'dp_ui_screen_off_driving', 'vals': [False]},
{'name': 'dp_app_waze', 'vals': [False]}, {'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_dev', 'default': True, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_dev_mini', 'default': True, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_blinker', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_driving_ui', 'vals': [True]}, {'name': 'dp_ui_screen_off_driving', 'vals': [False]},
{'name': 'dp_app_waze', 'vals': [False]}, {'name': 'dp_app_hr', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_brightness', 'default': 0, 'type': 'UInt8', 'min': 0, 'max': 100, 'conf_type': ['param', 'struct']},
{'name': 'dp_ui_volume_boost', 'default': 0, 'type': 'Int8', 'min': -100, 'max': 100, 'conf_type': ['param', 'struct']},
# Apps
{'name': 'dp_app_auto_update', 'default': True, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_ext_gps', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_tomtom', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_tomtom_auto', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_app_tomtom', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_app_tomtom_manual', 'default': 0, 'type': 'Int8', 'min': -1, 'max': 1, 'depends': [{'name': 'dp_app_tomtom', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_app_autonavi', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_autonavi_auto', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_app_autonavi', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_app_autonavi_manual', 'default': 0, 'type': 'Int8', 'min': -1, 'max': 1, 'depends': [{'name': 'dp_app_autonavi', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_app_aegis', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_aegis_auto', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_app_aegis', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_app_aegis_manual', 'default': 0, 'type': 'Int8', 'min': -1, 'max': 1, 'depends': [{'name': 'dp_app_aegis', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_app_mixplorer', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_app_mixplorer_manual', 'default': 0, 'type': 'Int8', 'min': -1, 'max': 1, 'depends': [{'name': 'dp_app_mixplorer', 'vals': [True]}], 'conf_type': ['param', 'struct']},
# custom car
{'name': 'dp_car_selected', 'default': '', 'type': 'Text', 'conf_type': ['param']},
{'name': 'dp_car_list', 'default': '', 'type': 'Text', 'conf_type': ['param']},
{'name': 'dp_car_detected', 'default': '', 'type': 'Text', 'conf_type': ['param', 'struct']},
# toyota
{'name': 'dp_toyota_ldw', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_car_detected', 'vals': ['toyota']}], 'conf_type': ['param', 'struct']},
{'name': 'dp_toyota_sng', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_car_detected', 'vals': ['toyota']}, {'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_toyota_zss', 'default': False, 'type': 'Bool', 'depends': [{'name': 'dp_car_detected', 'vals': ['toyota']}], 'conf_type': ['param']},
{'name': 'dp_toyota_lowest_cruise_override', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_car_detected', 'vals': ['toyota']}, {'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_toyota_lowest_cruise_override_vego', 'default': True, 'type': 'Bool', 'depends': [{'name': 'dp_car_detected', 'vals': ['toyota']}, {'name': 'dp_atl', 'vals': [False]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_toyota_lowest_cruise_override_at', 'default': 28, 'type': 'Float32', 'depends': [{'name': 'dp_car_detected', 'vals': ['toyota']},
{'name': 'dp_toyota_lowest_cruise_override', 'vals': [True]}], 'min': 0, 'max': 255., 'conf_type': ['param', 'struct']},
{'name': 'dp_toyota_lowest_cruise_override_speed', 'default': 7, 'type': 'Float32', 'depends': [{'name': 'dp_car_detected', 'vals': ['toyota']},
{'name': 'dp_toyota_lowest_cruise_override_speed', 'vals': [True]}], 'min': 0, 'max': 255., 'conf_type': ['param', 'struct']},
# hyundai
{'name': 'dp_hkg_smart_mdps', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
# honda
{'name': 'dp_honda_eps_mod', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
#misc
{'name': 'dp_ip_addr', 'default': '', 'type': 'Text', 'conf_type': ['struct']},
{'name': 'dp_full_speed_fan', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_uno_fan_mode', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_last_modified', 'default': str(floor(time.time())), 'type': 'Text', 'conf_type': ['param']},
{'name': 'dp_camera_offset', 'default': 6, 'type': 'Int8', 'min': -255, 'max': 255, 'conf_type': ['param', 'struct']},
{'name': 'dp_locale', 'default': 'en-US', 'type': 'Text', 'conf_type': ['param', 'struct'], 'update_once': True},
{'name': 'dp_disable_relay', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_charging_ctrl', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_charging_at', 'default': 60, 'type': 'UInt8', 'min': 0, 'max': 100, 'depends': [{'name': 'dp_charging_ctrl', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_discharging_at', 'default': 70, 'type': 'UInt8', 'min': 0, 'max': 100, 'depends': [{'name': 'dp_charging_ctrl', 'vals': [True]}], 'conf_type': ['param', 'struct']},
{'name': 'dp_reg', 'default': True, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_is_updating', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
{'name': 'dp_sr_learner', 'default': True, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_lqr', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_reset_live_param_on_start', 'default': False, 'type': 'Bool', 'conf_type': ['param']},
{'name': 'dp_timebomb_assist', 'default': False, 'type': 'Bool', 'conf_type': ['param', 'struct']},
]
def get_definition(name):
for conf in confs:
if conf['name'] == name:
return conf
return None
def to_param_val(name, val):
conf = get_definition(name)
if conf is not None:
type = conf['type'].lower()
try:
if 'bool' in type:
val = '1' if val else '0'
elif 'int' in type:
val = int(val)
elif 'float' in type:
val = float(val)
return str(val)
except (ValueError, TypeError):
return ''
return ''
def to_struct_val(name, val):
conf = get_definition(name)
if conf is not None:
try:
type = conf['type'].lower()
if 'bool' in type:
val = True if val == '1' else False
elif 'int' in type:
val = int(val)
elif 'float' in type:
val = float(val)
return val
except (ValueError, TypeError):
return None
return None
'''
function to convert param name into struct name.
'''
def get_struct_name(snake_str):
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + ''.join(x.title() for x in components[1:])
'''
function to generate struct for log.capnp
'''
def gen_log_struct():
count = 0
str = "# dp\n"
str += "struct DragonConf {\n"
for conf in confs:
name = get_struct_name(conf['name'])
if 'struct' in conf['conf_type']:
str += f" {name} @{count} :{conf['type']};\n"
count += 1
str += "}"
print(str)
'''
function to append new keys to params.py
'''
def init_params_keys(keys, type):
for conf in confs:
if 'param' in conf['conf_type']:
keys[conf['name'].encode('utf-8')] = type
return keys
'''
function to generate support car list
'''
def get_support_car_list():
attrs = ['FINGERPRINTS', 'FW_VERSIONS']
cars = dict()
for car_folder in [x[0] for x in os.walk('/data/openpilot/selfdrive/car')]:
try:
car_name = car_folder.split('/')[-1]
if car_name != "mock":
names = []
for attr in attrs:
values = __import__('selfdrive.car.%s.values' % car_name, fromlist=[attr])
if hasattr(values, attr):
attr_values = getattr(values, attr)
else:
continue
if isinstance(attr_values, dict):
for f, v in attr_values.items():
if f not in names:
names.append(f)
names.sort()
cars[car_name] = names
except (ImportError, IOError, ValueError):
pass
return json.dumps(cars)
'''
function to init param value.
should add this into manager.py
'''
def init_params_vals(params):
for conf in confs:
if 'param' in conf['conf_type']:
if conf['name'] == 'dp_car_list':
params.put(conf['name'], get_support_car_list())
elif params.get(conf['name']) is None:
params.put(conf['name'], to_param_val(conf['name'], conf['default']))
if __name__ == "__main__":
gen_log_struct()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,454
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/test_cpu_usage.py
|
#!/usr/bin/env python3
import os
import time
import sys
import subprocess
import cereal.messaging as messaging
from common.basedir import BASEDIR
from common.params import Params
from selfdrive.test.helpers import set_params_enabled
def cputime_total(ct):
return ct.cpuUser + ct.cpuSystem + ct.cpuChildrenUser + ct.cpuChildrenSystem
def print_cpu_usage(first_proc, last_proc):
procs = [
("selfdrive.controls.controlsd", 47.0),
("./loggerd", 42.0),
("selfdrive.locationd.locationd", 35.0),
("selfdrive.locationd.paramsd", 12.0),
("selfdrive.controls.plannerd", 10.0),
("./_modeld", 7.12),
("./camerad", 7.07),
("./_sensord", 6.17),
("./_ui", 5.82),
("selfdrive.controls.radard", 5.67),
("./boardd", 3.63),
("./_dmonitoringmodeld", 2.67),
("selfdrive.logmessaged", 1.7),
("selfdrive.thermald.thermald", 2.41),
("selfdrive.locationd.calibrationd", 2.0),
("selfdrive.monitoring.dmonitoringd", 1.90),
("./proclogd", 1.54),
("./_gpsd", 0.09),
("./clocksd", 0.02),
("./ubloxd", 0.02),
("selfdrive.tombstoned", 0),
("./logcatd", 0),
]
r = True
dt = (last_proc.logMonoTime - first_proc.logMonoTime) / 1e9
result = "------------------------------------------------\n"
for proc_name, normal_cpu_usage in procs:
try:
first = [p for p in first_proc.procLog.procs if proc_name in p.cmdline][0]
last = [p for p in last_proc.procLog.procs if proc_name in p.cmdline][0]
cpu_time = cputime_total(last) - cputime_total(first)
cpu_usage = cpu_time / dt * 100.
if cpu_usage > max(normal_cpu_usage * 1.1, normal_cpu_usage + 5.0):
result += f"Warning {proc_name} using more CPU than normal\n"
r = False
elif cpu_usage < min(normal_cpu_usage * 0.65, max(normal_cpu_usage - 1.0, 0.0)):
result += f"Warning {proc_name} using less CPU than normal\n"
r = False
result += f"{proc_name.ljust(35)} {cpu_usage:.2f}%\n"
except IndexError:
result += f"{proc_name.ljust(35)} NO METRICS FOUND\n"
r = False
result += "------------------------------------------------\n"
print(result)
return r
def test_cpu_usage():
cpu_ok = False
# start manager
manager_path = os.path.join(BASEDIR, "selfdrive/manager.py")
manager_proc = subprocess.Popen(["python", manager_path])
try:
proc_sock = messaging.sub_sock('procLog', conflate=True, timeout=2000)
# wait until everything's started
start_time = time.monotonic()
while time.monotonic() - start_time < 210:
if Params().get("CarParams") is not None:
break
time.sleep(2)
# take first sample
time.sleep(5)
first_proc = messaging.recv_sock(proc_sock, wait=True)
if first_proc is None:
raise Exception("\n\nTEST FAILED: progLog recv timed out\n\n")
# run for a minute and get last sample
time.sleep(60)
last_proc = messaging.recv_sock(proc_sock, wait=True)
cpu_ok = print_cpu_usage(first_proc, last_proc)
finally:
manager_proc.terminate()
ret = manager_proc.wait(20)
if ret is None:
manager_proc.kill()
return cpu_ok
if __name__ == "__main__":
set_params_enabled()
Params().delete("CarParams")
passed = False
try:
passed = test_cpu_usage()
except Exception as e:
print("\n\n\n", "TEST FAILED:", str(e), "\n\n\n")
finally:
sys.exit(int(not passed))
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,455
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/i18n.py
|
import gettext
from common.hardware import EON
from common.hardware_android import getprop
locale_dir = '/data/openpilot/selfdrive/assets/locales'
supported_language = ['en-US', 'zh-TW', 'zh-CN', 'ja-JP', 'ko-KR']
def get_locale():
return getprop("persist.sys.locale") if EON else 'en-US'
def events():
i18n = gettext.translation('events', localedir=locale_dir, fallback=True, languages=[get_locale()])
i18n.install()
return i18n.gettext
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,456
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/controls/lib/dynamic_follow/support.py
|
# mypy: ignore-errors
class LeadData:
v_lead = None
x_lead = None
a_lead = None
status = False
new_lead = False
class CarData:
v_ego = 0.0
a_ego = 0.0
left_blinker = False
right_blinker = False
cruise_enabled = True
class dfData:
v_egos = []
v_rels = []
class dfProfiles:
traffic = 0
relaxed = 1
roadtrip = 2
auto = 3
to_profile = {0: 'traffic', 1: 'relaxed', 2: 'roadtrip', 3: 'auto'}
to_idx = {v: k for k, v in to_profile.items()}
default = relaxed
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,457
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/tests/test_fetch_sat_info.py
|
from datetime import datetime
import unittest
from laika import AstroDog
from laika.gps_time import GPSTime
class TestFetchSatInfo(unittest.TestCase):
def test_fetch_data_from_distant_future(self):
dog = AstroDog()
date = GPSTime.from_datetime(datetime(3120, 1, 1))
self.assertRaises(RuntimeError, dog.get_sat_info, "G01", date)
def test_no_block_satellite_when_get_info_from_not_available_period(self):
'''If you first fetch satellite info from period when navigation data
isn't available and next from period when navigation data are available
then you should get correct result'''
prn = "C03"
constellations = ["GPS", "BEIDOU"]
available_date = GPSTime.from_datetime(datetime(2020, 5, 1, 12, 0))
not_available_date = GPSTime.from_datetime(datetime(2000, 1, 1))
dog = AstroDog(pull_orbit=True, valid_const=constellations)
sat_info = dog.get_sat_info(prn, not_available_date)
self.assertIsNone(sat_info)
sat_info = dog.get_sat_info(prn, available_date)
self.assertIsNotNone(sat_info)
def test_get_all_sat_info_gps(self):
time = GPSTime.from_datetime(datetime(2020, 5, 1, 12, 0, 0))
kwargs_list = [
{"valid_const": ["GPS"], "pull_orbit": True},
{"valid_const": ["GPS"], "pull_orbit": False},
{"valid_const": ["GLONASS"], "pull_orbit": True},
{"valid_const": ["GLONASS"], "pull_orbit": False},
{"valid_const": ["BEIDOU"], "pull_orbit": True},
{"valid_const": ["GALILEO"], "pull_orbit": True},
{"valid_const": ["QZNSS"], "pull_orbit": True}
]
for kwargs in kwargs_list:
dog = AstroDog(**kwargs)
infos = dog.get_all_sat_info(time)
self.assertGreater(len(infos), 0)
if __name__ == '__main__':
unittest.main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,458
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/hardware.py
|
import os
import random
from typing import cast
from cereal import log
from common.hardware_android import Android
from common.hardware_tici import Tici
from common.hardware_base import HardwareBase
EON = os.path.isfile('/EON')
TICI = os.path.isfile('/TICI')
PC = not (EON or TICI)
ANDROID = EON
NetworkType = log.ThermalData.NetworkType
NetworkStrength = log.ThermalData.NetworkStrength
class Pc(HardwareBase):
def get_sound_card_online(self):
return True
def get_imei(self, slot):
return "%015d" % random.randint(0, 1 << 32)
def get_serial(self):
return "cccccccc"
def get_subscriber_info(self):
return ""
def reboot(self, reason=None):
print("REBOOT!")
def get_network_type(self):
return NetworkType.wifi
def get_sim_info(self):
return {
'sim_id': '',
'mcc_mnc': None,
'network_type': ["Unknown"],
'sim_state': ["ABSENT"],
'data_connected': False
}
def get_network_strength(self, network_type):
return NetworkStrength.unknown
if EON:
HARDWARE = cast(HardwareBase, Android())
elif TICI:
HARDWARE = cast(HardwareBase, Tici())
else:
HARDWARE = cast(HardwareBase, Pc())
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,459
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/interbridge/interbridged.py
|
#!/usr/bin/env python
#pylint: skip-file
# flake8: noqa
from .unisocket import MixedSocketServer
from threading import Thread
import cereal.messaging as messaging
from common.realtime import Ratekeeper
from queue import SimpleQueue as Queue
RATE = 20. # In Hz
PORT = 8989
class InterBridge:
publishers = [
'thermal',
'controlsState',
'model',
'health',
'carState',
'carControl',
'plan',
'liveLocation',
'liveMpc',
'liveLongitudinalMpc',
'driverState',
'liveParameters',
'pathPlan',
'carParams',
'dMonitoringState',
'testJoystick',
]
def __init__(self, sm=None, pm=None, can_sock=None):
# Initialize received messages queue
self.msgs_queue = Queue()
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['testJoystick'])
self.sm = sm
if self.sm is None:
self.sm = messaging.SubMaster(self.publishers)
self.rk = Ratekeeper(RATE, print_delay_threshold=None)
def sock_msg_received(self, client, server, msg):
self.msgs_queue.put(msg)
def sock_msg_send(self, msg):
pass
def step(self):
# Send msg from ZMQ to Socket, only if there are connected clients
if self.count_clients():
self.sm.update(0)
send_msg = {}
for publisher in self.publishers:
if self.sm.updated[publisher]:
send_msg[publisher] = self.sm[publisher].to_dict()
send_msg[publisher]['logMonoTime'] = self.sm.logMonoTime[publisher]
# Hack, convert known bytes value to hex (bytes are not serializable)
if publisher == 'carParams' and send_msg[publisher]['carFw']:
for idx, val in enumerate(send_msg[publisher]['carFw']):
send_msg[publisher]['carFw'][idx]['fwVersion'] = val['fwVersion'].hex()
if send_msg:
self.sock_msg_send(send_msg)
# Send msg from Socket to ZMQ (only testJoystick!)
while not self.msgs_queue.empty():
msg = self.msgs_queue.get()
if 'testJoystick' in msg:
dat = messaging.new_message('testJoystick')
testJoystick = dat.testJoystick
testJoystick.axes = msg['testJoystick']['axes']
testJoystick.buttons = msg['testJoystick']['buttons']
testJoystick.enabled = msg['testJoystick']['enabled']
testJoystick.axesMode = msg['testJoystick']['axesMode']
self.pm.send('testJoystick', dat)
def interbridged_thread(self, count_callback):
self.count_clients = count_callback
while True:
self.step()
self.rk.keep_time()
def main(sm=None, pm=None, logcan=None):
bridge = InterBridge(sm, pm, logcan)
sock_server = MixedSocketServer('0.0.0.0:'+str(PORT))
sock_server.msg_received = bridge.sock_msg_received
bridge.sock_msg_send = sock_server.broadcast
ib_thread = Thread(
name='InterBridge',
target=bridge.interbridged_thread,
args=(sock_server.count_clients, ),
daemon=True)
ib_thread.start()
while True: # Server stops on error and needs to be restarted?
sock_server.serve_forever()
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,460
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/external/simpleperf/inferno/data_types.py
|
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class CallSite:
def __init__(self, method, dso):
self.method = method
self.dso = dso
class Thread:
def __init__(self, tid, pid):
self.tid = tid
self.pid = pid
self.name = ""
self.samples = []
self.flamegraph = FlameGraphCallSite("root", "", 0)
self.num_samples = 0
self.num_events = 0
def add_callchain(self, callchain, symbol, sample):
self.name = sample.thread_comm
self.num_samples += 1
self.num_events += sample.period
chain = []
for j in range(callchain.nr):
entry = callchain.entries[callchain.nr - j - 1]
if entry.ip == 0:
continue
chain.append(CallSite(entry.symbol.symbol_name, entry.symbol.dso_name))
chain.append(CallSite(symbol.symbol_name, symbol.dso_name))
self.flamegraph.add_callchain(chain, sample.period)
class Process:
def __init__(self, name, pid):
self.name = name
self.pid = pid
self.threads = {}
self.cmd = ""
self.props = {}
# num_samples is the count of samples recorded in the profiling file.
self.num_samples = 0
# num_events is the count of events contained in all samples. Each sample contains a
# count of events happened since last sample. If we use cpu-cycles event, the count
# shows how many cpu-cycles have happened during recording.
self.num_events = 0
def get_thread(self, tid, pid):
thread = self.threads.get(tid)
if thread is None:
thread = self.threads[tid] = Thread(tid, pid)
return thread
def add_sample(self, sample, symbol, callchain):
thread = self.get_thread(sample.tid, sample.pid)
thread.add_callchain(callchain, symbol, sample)
self.num_samples += 1
# sample.period is the count of events happened since last sample.
self.num_events += sample.period
class FlameGraphCallSite:
callsite_counter = 0
@classmethod
def _get_next_callsite_id(cls):
cls.callsite_counter += 1
return cls.callsite_counter
def __init__(self, method, dso, id):
# map from (dso, method) to FlameGraphCallSite. Used to speed up add_callchain().
self.child_dict = {}
self.children = []
self.method = method
self.dso = dso
self.num_events = 0
self.offset = 0 # Offset allows position nodes in different branches.
self.id = id
def weight(self):
return float(self.num_events)
def add_callchain(self, chain, num_events):
self.num_events += num_events
current = self
for callsite in chain:
current = current._get_child(callsite)
current.num_events += num_events
def _get_child(self, callsite):
key = (callsite.dso, callsite.method)
child = self.child_dict.get(key)
if child is None:
child = self.child_dict[key] = FlameGraphCallSite(callsite.method, callsite.dso,
self._get_next_callsite_id())
return child
def trim_callchain(self, min_num_events):
""" Remove call sites with num_events < min_num_events in the subtree.
Remaining children are collected in a list.
"""
for key in self.child_dict:
child = self.child_dict[key]
if child.num_events >= min_num_events:
child.trim_callchain(min_num_events)
self.children.append(child)
# Relese child_dict since it will not be used.
self.child_dict = None
def get_max_depth(self):
return max([c.get_max_depth() for c in self.children]) + 1 if self.children else 1
def generate_offset(self, start_offset):
self.offset = start_offset
child_offset = start_offset
for child in self.children:
child_offset = child.generate_offset(child_offset)
return self.offset + self.num_events
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,461
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/dp_common.py
|
#!/usr/bin/env python3.7
#import subprocess
from cereal import car
from common.params import Params
from common.realtime import sec_since_boot
import os
#import time
#from math import floor
params = Params()
from common.travis_checker import travis
#from common.dp_conf import init_params_vals
if travis:
PARAM_PATH = str(os.environ.get('HOME')) + "/.comma/params/d/"
else:
PARAM_PATH = "/data/params/d/"
LAST_MODIFIED = str(PARAM_PATH) + "dp_last_modified"
#if not os.path.exists(LAST_MODIFIED):
# os.makedirs(str(os.environ.get('HOME')) + "/.comma/params/d/", exist_ok=True)
# print("dp_last_modified is " + str(floor(time.time())))
# params.put('dp_last_modified',str(floor(time.time())))
# init_params_vals(params)
#def is_online():
#try:
#return not subprocess.call(["ping", "-W", "4", "-c", "1", "117.28.245.92"])
#except ProcessLookupError:
#return False
def common_controller_ctrl(enabled, dragonconf, blinker_on, steer_req, v_ego):
if enabled:
if (dragonconf.dpSteeringOnSignal and blinker_on) or not dragonconf.dpLatCtrl:
steer_req = 0 if isinstance(steer_req, int) else False
return steer_req
def common_interface_atl(ret, atl):
# dp
enable_acc = ret.cruiseState.enabled
if atl and ret.cruiseState.available:
enable_acc = True
if ret.gearShifter in [car.CarState.GearShifter.reverse, car.CarState.GearShifter.park]:
enable_acc = False
if ret.seatbeltUnlatched or ret.doorOpen:
enable_acc = False
return enable_acc
def common_interface_get_params_lqr(ret):
if params.get('dp_lqr') == b'1':
ret.lateralTuning.init('lqr')
ret.lateralTuning.lqr.scale = 1500.0
ret.lateralTuning.lqr.ki = 0.05
ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
ret.lateralTuning.lqr.c = [1., 0.]
ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
ret.lateralTuning.lqr.l = [0.3233671, 0.3185757]
ret.lateralTuning.lqr.dcGain = 0.002237852961363602
return ret
def get_last_modified(delay, old_check, old_modified):
new_check = sec_since_boot()
if old_check is None or new_check - old_check >= delay:
return new_check, os.stat(LAST_MODIFIED).st_mtime
else:
return old_check, old_modified
def param_get_if_updated(param, type_of, old_val, old_modified):
try:
modified = os.stat(PARAM_PATH + param).st_mtime
except OSError:
return old_val, old_modified
if old_modified != modified:
new_val = param_get(param, type_of, old_val)
new_modified = modified
else:
new_val = old_val
new_modified = old_modified
return new_val, new_modified
def param_get(param_name, type_of, default):
try:
val = params.get(param_name, encoding='utf8').rstrip('\x00')
if type_of == 'bool':
val = val == '1'
elif type_of == 'int':
val = int(val)
elif type_of == 'float':
val = float(val)
except (TypeError, ValueError):
val = default
return val
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,462
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/laika/helpers.py
|
import warnings
import numpy as np
from .lib.coordinates import LocalCoord
# From https://gpsd.gitlab.io/gpsd/NMEA.html - Satellite IDs section
NMEA_ID_RANGES = (
{
'range': (1, 32),
'constellation': 'GPS'
},
{
'range': (33, 54),
'constellation': 'SBAS'
},
{
'range': (55, 64),
'constellation': 'SBAS'
},
{
'range': (65, 88),
'constellation': 'GLONASS'
},
{
'range': (89, 96),
'constellation': 'GLONASS'
},
{
'range': (120, 151),
'constellation': 'SBAS'
},
{
'range': (152, 158),
'constellation': 'SBAS'
},
{
'range': (173, 182),
'constellation': 'IMES'
},
{
'range': (193, 197),
'constellation': 'QZNSS'
},
{
'range': (198, 200),
'constellation': 'QZNSS'
},
{
'range': (201, 235),
'constellation': 'BEIDOU'
},
{
'range': (301, 336),
'constellation': 'GALILEO'
},
{
'range': (401, 437),
'constellation': 'BEIDOU'
}
)
# Source: RINEX 3.04
RINEX_CONSTELLATION_IDENTIFIERS = {
'GPS': 'G',
'GLONASS': 'R',
'SBAS': 'S',
'GALILEO': 'E',
'BEIDOU': 'C',
'QZNSS': 'J',
'IRNSS': 'I'
}
# Make above dictionary bidirectional map:
# Now you can ask for constellation using:
# >>> RINEX_CONSTELLATION_IDENTIFIERS['R']
# "GLONASS"
RINEX_CONSTELLATION_IDENTIFIERS.update(
dict([reversed(i) for i in RINEX_CONSTELLATION_IDENTIFIERS.items()]) # type: ignore
)
def get_el_az(pos, sat_pos):
converter = LocalCoord.from_ecef(pos)
sat_ned = converter.ecef2ned(sat_pos)
sat_range = np.linalg.norm(sat_ned)
el = np.arcsin(-sat_ned[2]/sat_range) # pylint: disable=unsubscriptable-object
az = np.arctan2(sat_ned[1], sat_ned[0]) # pylint: disable=unsubscriptable-object
return el, az
def get_closest(time, candidates, recv_pos=None):
if recv_pos is None:
# Takes a list of object that have an epoch(GPSTime) value
# and return the one that is closest the given time (GPSTime)
tdiff = np.inf
closest = None
for candidate in candidates:
if abs(time - candidate.epoch) < tdiff:
closest = candidate
tdiff = abs(time - candidate.epoch)
return closest
else:
pdiff = np.inf
closest = None
for candidate in candidates:
cand_diff = np.linalg.norm(recv_pos - candidate.pos)
if cand_diff < pdiff and candidate.valid(time, recv_pos):
pdiff = cand_diff
closest = candidate
return closest
def get_constellation(prn):
identifier = prn[0]
if identifier in RINEX_CONSTELLATION_IDENTIFIERS:
return RINEX_CONSTELLATION_IDENTIFIERS[identifier]
else:
warnings.warn("Unknown constellation for PRN %s" % prn)
return None
def get_unknown_prn_from_nmea_id(nmea_id):
return "?%d" % nmea_id
def get_nmea_id_from_unknown_prn(prn):
return int(prn[1:])
def is_unknown_prn(prn):
return prn[0] == '?'
def get_prn_from_nmea_id(nmea_id):
constellation_offsets = {}
for entry in NMEA_ID_RANGES:
start, end = entry['range']
constellation = entry['constellation']
if nmea_id < start:
warnings.warn("RINEX PRN for nmea id %i not known" % nmea_id)
return get_unknown_prn_from_nmea_id(nmea_id)
constellation_offset = constellation_offsets.get(constellation, 0)
if nmea_id <= end:
if constellation is None:
warnings.warn("Constellation for nmea id "
"%i not known" % nmea_id)
return get_unknown_prn_from_nmea_id(nmea_id)
identifier = RINEX_CONSTELLATION_IDENTIFIERS.get(constellation)
if identifier is None:
warnings.warn("RINEX3 constellation identifier for "
"constellation %s is not known" % constellation)
return get_unknown_prn_from_nmea_id(nmea_id)
number = nmea_id - start + 1 + constellation_offset
return "%s%02d" % (identifier, number)
else:
range_width = end - start + 1
constellation_offsets[constellation] = constellation_offset + range_width
warnings.warn("RINEX PRN for nmea id %i not known" % nmea_id)
return get_unknown_prn_from_nmea_id(nmea_id)
def get_nmea_id_from_prn(prn):
if is_unknown_prn(prn):
return get_nmea_id_from_unknown_prn(prn)
prn_constellation = get_constellation(prn)
satellite_id = int(prn[1:])
if satellite_id < 1:
raise ValueError("PRN must contains number greater then 0")
constellation_offset = 0
for entry in NMEA_ID_RANGES:
start, end = entry['range']
constellation = entry['constellation']
if constellation != prn_constellation:
continue
range_width = end - start + 1
index_in_range = satellite_id - constellation_offset - 1
if range_width > index_in_range:
return start + index_in_range
else:
constellation_offset += range_width
raise NotImplementedError("NMEA ID not found for PRN %s" % prn)
def rinex3_obs_from_rinex2_obs(observable):
if observable == 'P2':
return 'C2P'
if len(observable) == 2:
return observable + 'C'
else:
raise NotImplementedError("Don't know this: " + observable)
class TimeRangeHolder:
'''Class to support test if date is in any of the mutliple, sparse ranges'''
def __init__(self):
# Sorted list
self._ranges = []
def _previous_and_contains_index(self, time):
prev = None
current = None
for idx, (start, end) in enumerate(self._ranges):
# Time may be in next range
if time > end:
continue
# Time isn't in any next range
if time < start:
prev = idx - 1
current = None
# Time is in current range
else:
prev = idx - 1
current = idx
break
# Break in last loop
if prev is None:
prev = len(self._ranges) - 1
return prev, current
def add(self, start_time, end_time):
prev_start, current_start = self._previous_and_contains_index(start_time)
_, current_end = self._previous_and_contains_index(end_time)
# Merge ranges
if current_start is not None and current_end is not None:
# If ranges are different then merge
if current_start != current_end:
new_start, _ = self._ranges[current_start]
_, new_end = self._ranges[current_end]
new_range = (new_start, new_end)
# Required reversed order to corrent remove
del self._ranges[current_end]
del self._ranges[current_start]
self._ranges.insert(current_start, new_range)
# Extend range - left
elif current_start is not None:
new_start, _ = self._ranges[current_start]
new_range = (new_start, end_time)
del self._ranges[current_start]
self._ranges.insert(current_start, new_range)
# Extend range - right
elif current_end is not None:
_, new_end = self._ranges[current_end]
new_range = (start_time, new_end)
del self._ranges[current_end]
self._ranges.insert(prev_start + 1, new_range)
# Create new range
else:
new_range = (start_time, end_time)
self._ranges.insert(prev_start + 1, new_range)
def __contains__(self, time):
for start, end in self._ranges:
# Time may be in next range
if time > end:
continue
# Time isn't in any next range
if time < start:
return False
# Time is in current range
else:
return True
return False
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,463
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/carcontrols/debug_controls.py
|
#!/usr/bin/env python
from common.numpy_fast import clip
from common.params import Params
from copy import copy
from cereal import car, log
import cereal.messaging as messaging
from selfdrive.car.car_helpers import get_car, get_one_can
from selfdrive.boardd.boardd import can_list_to_can_capnp
HwType = log.HealthData.HwType
def steer_thread():
poller = messaging.Poller()
logcan = messaging.sub_sock('can')
joystick_sock = messaging.sub_sock('testJoystick', conflate=True, poller=poller)
carstate = messaging.pub_sock('carState')
carcontrol = messaging.pub_sock('carControl')
sendcan = messaging.pub_sock('sendcan')
button_1_last = 0
enabled = False
# wait for CAN packets
print("Waiting for CAN messages...")
get_one_can(logcan)
CI, CP = get_car(logcan, sendcan)
Params().put("CarParams", CP.to_bytes())
CC = car.CarControl.new_message()
while True:
# send
joystick = messaging.recv_one(joystick_sock)
can_strs = messaging.drain_sock_raw(logcan, wait_for_one=True)
CS = CI.update(CC, can_strs)
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
actuators = car.CarControl.Actuators.new_message()
if joystick is not None:
axis_3 = clip(-joystick.testJoystick.axes[3] * 1.05, -1., 1.) # -1 to 1
actuators.steer = axis_3
actuators.steerAngle = axis_3 * 43. # deg
axis_1 = clip(-joystick.testJoystick.axes[1] * 1.05, -1., 1.) # -1 to 1
actuators.gas = max(axis_1, 0.)
actuators.brake = max(-axis_1, 0.)
pcm_cancel_cmd = joystick.testJoystick.buttons[0]
button_1 = joystick.testJoystick.buttons[1]
if button_1 and not button_1_last:
enabled = not enabled
button_1_last = button_1
#print "enable", enabled, "steer", actuators.steer, "accel", actuators.gas - actuators.brake
hud_alert = 0
if joystick.testJoystick.buttons[3]:
hud_alert = "steerRequired"
CC.actuators.gas = actuators.gas
CC.actuators.brake = actuators.brake
CC.actuators.steer = actuators.steer
CC.actuators.steerAngle = actuators.steerAngle
CC.hudControl.visualAlert = hud_alert
CC.hudControl.setSpeed = 20
CC.cruiseControl.cancel = pcm_cancel_cmd
CC.enabled = enabled
can_sends = CI.apply(CC)
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan'))
# broadcast carState
cs_send = messaging.new_message('carState')
cs_send.carState = copy(CS)
carstate.send(cs_send.to_bytes())
# broadcast carControl
cc_send = messaging.new_message('carControl')
cc_send.carControl = copy(CC)
carcontrol.send(cc_send.to_bytes())
if __name__ == "__main__":
steer_thread()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,464
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/debug/can_compare.py
|
#!/usr/bin/env python3
import binascii
import os
import sys
from collections import defaultdict
import cereal.messaging as messaging
from common.realtime import sec_since_boot
def can_compare(bus=0, max_msg=None, addr="127.0.0.1"):
logcan = messaging.sub_sock('can', addr=addr)
start = sec_since_boot()
msgs = defaultdict(list)
canbus = int(os.getenv("CAN", bus))
while sec_since_boot()-start < 5.0:
can_recv = messaging.drain_sock(logcan, wait_for_one=True)
for x in can_recv:
for y in x.can:
if y.src == canbus:
msgs[y.address].append(y.dat)
try:
input("Change State and press Enter to continue...")
except SyntaxError:
pass
start = sec_since_boot()
msgs2 = defaultdict(list)
while sec_since_boot()-start < 5.0:
can_recv = messaging.drain_sock(logcan, wait_for_one=True)
for x in can_recv:
for y in x.can:
if y.src == canbus:
msgs2[y.address].append(y.dat)
try:
input("Change State back and press Enter to continue...")
except SyntaxError:
pass
start = sec_since_boot()
msgs3 = defaultdict(list)
while sec_since_boot()-start < 5.0:
can_recv = messaging.drain_sock(logcan, wait_for_one=True)
for x in can_recv:
for y in x.can:
if y.src == canbus:
msgs3[y.address].append(y.dat)
dd = chr(27) + "[2J"
dd += "%5.2f\n" % (sec_since_boot() - start)
for k,v in sorted(zip(msgs.keys(), map(lambda x: binascii.hexlify(x[-1]), msgs.values()))):
try:
if binascii.hexlify(list(msgs2.values())[list(msgs2).index(k)][-1]) != binascii.hexlify(list(msgs3.values())[list(msgs3).index(k)][-1]) and v == binascii.hexlify(list(msgs3.values())[list(msgs3).index(k)][-1]):
dd += "%s(%6d) %s\n" % ("%04X(%4d)" % (k,k),len(msgs[k]), v.decode('ascii'))
w = binascii.hexlify(list(msgs2.values())[list(msgs2).index(k)][-1])
dd +="%s(%6d) %s\n" % ("%04X(%4d)" % (k,k),len(msgs[k]), w.decode('ascii'))
except ValueError:
pass
print(dd)
if __name__ == "__main__":
if len(sys.argv) > 3:
can_compare(int(sys.argv[1]), int(sys.argv[2]), sys.argv[3])
elif len(sys.argv) > 2:
can_compare(int(sys.argv[1]), int(sys.argv[2]))
elif len(sys.argv) > 1:
can_compare(int(sys.argv[1]))
else:
can_compare()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,465
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika/downloader.py
|
import gzip
import os
import urllib.request
from datetime import datetime
from urllib.parse import urlparse
from .constants import SECS_IN_DAY, SECS_IN_WEEK
from .gps_time import GPSTime
from .unlzw import unlzw
def ftpcache_path(url):
p = urlparse(url)
return 'http://ftpcache.comma.life/'+p.netloc.replace(".", "-")+p.path
def download_file(url_base, folder_path, cacheDir, filename, compression='', overwrite=False):
folder_path_abs = os.path.join(cacheDir, folder_path)
filename_zipped = filename + compression
filepath = os.path.join(folder_path_abs, filename)
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
url = url_base + folder_path + filename_zipped
url_cache = ftpcache_path(url)
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
# try to download
try:
print("pulling from", url_cache, "to", filepath)
urlf = urllib.request.urlopen(url_cache, timeout=5)
except IOError:
print("cache download failed, pulling from", url, "to", filepath)
try:
urlf = urllib.request.urlopen(url)
except IOError:
raise IOError("Could not download file from: " + url)
data_zipped = urlf.read()
urlf.close()
with open(filepath_zipped, 'wb') as wf:
wf.write(data_zipped)
if compression == '':
return filepath_zipped
elif compression == '.gz':
f = gzip.open(filepath_zipped, 'rb')
uncompressed_data = f.read()
f .close()
elif compression == '.Z':
f = open(filepath_zipped, 'rb')
compressed_data = f.read()
uncompressed_data = unlzw(compressed_data)
f.close()
else:
raise NotImplementedError('unknown compression: ', compression)
f = open(filepath, 'wb')
f.write(uncompressed_data)
f.close()
return filepath
def download_nav(time, cache_dir, constellation='GPS'):
t = time.as_datetime()
try:
if GPSTime.from_datetime(datetime.utcnow()) - time > SECS_IN_DAY:
url_base = 'ftp://cddis.gsfc.nasa.gov/gnss/data/daily/'
cache_subdir = cache_dir + 'daily_nav/'
if constellation =='GPS':
filename = t.strftime("brdc%j0.%yn")
folder_path = t.strftime('%Y/%j/%yn/')
elif constellation =='GLONASS':
filename = t.strftime("brdc%j0.%yg")
folder_path = t.strftime('%Y/%j/%yg/')
return download_file(url_base, folder_path, cache_subdir, filename, compression='.Z')
else:
url_base = 'ftp://cddis.gsfc.nasa.gov/gnss/data/hourly/'
cache_subdir = cache_dir + 'hourly_nav/'
if constellation =='GPS':
filename = t.strftime("hour%j0.%yn")
folder_path = t.strftime('%Y/%j/')
return download_file(url_base, folder_path, cache_subdir, filename, compression='.Z', overwrite=True)
except IOError:
pass
def download_orbits(time, cache_dir):
cache_subdir = cache_dir + 'cddis_products/'
url_base = 'ftp://cddis.gsfc.nasa.gov/gnss/products/'
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
folder_path = "%i/" % (time.week)
if GPSTime.from_datetime(datetime.utcnow()) - time > 3*SECS_IN_WEEK:
try:
filename = "igs%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igr%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_18.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_12.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_06.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_00.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
return downloaded_files
def download_orbits_russia(time, cache_dir):
cache_subdir = cache_dir + 'russian_products/'
url_base = 'ftp://ftp.glonass-iac.ru/MCC/PRODUCTS/'
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
t = time.as_datetime()
if GPSTime.from_datetime(datetime.utcnow()) - time > 2*SECS_IN_WEEK:
try:
folder_path = t.strftime('%y%j/final/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
continue
except IOError:
pass
try:
folder_path = t.strftime('%y%j/rapid/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
try:
folder_path = t.strftime('%y%j/ultra/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
return downloaded_files
def download_ionex(time, cache_dir):
cache_subdir = cache_dir + 'ionex/'
t = time.as_datetime()
url_base = 'ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/'
folder_path = t.strftime('%Y/%j/')
for filename in [t.strftime("codg%j0.%yi"), t.strftime("c1pg%j0.%yi"), t.strftime("c2pg%j0.%yi")]:
try:
filepath = download_file(url_base, folder_path, cache_subdir, filename, compression='.Z')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_dcb(time, cache_dir):
cache_subdir = cache_dir + 'dcb/'
# seem to be a lot of data missing, so try many days
for time in [time - i*SECS_IN_DAY for i in range(14)]:
try:
t = time.as_datetime()
url_base = 'ftp://cddis.nasa.gov/gnss/products/bias/'
folder_path = t.strftime('%Y/')
filename = t.strftime("CAS0MGXRAP_%Y%j0000_01D_01D_DCB.BSX")
filepath = download_file(url_base, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_cors_coords(cache_dir):
cache_subdir = cache_dir + 'cors_coord/'
url_base = 'ftp://geodesy.noaa.gov/cors/coord/coord_08/'
url_path = urllib.request.urlopen(url_base)
file_names = [file_string.split()[-1] for file_string in str(url_path.read()).split('\\r\\n') if len(file_string) > 5]
file_names = [file_name for file_name in file_names if file_name[-9:] == 'coord.txt']
filepaths = []
for file_name in file_names:
filepaths.append(download_file(url_base, '', cache_subdir, file_name))
return filepaths
def download_cors_station(time, station_name, cache_dir):
cache_subdir = cache_dir + 'cors_obs/'
t = time.as_datetime()
folder_path = t.strftime('%Y/%j/') + station_name + '/'
filename = station_name + t.strftime("%j0.%yo")
url_base = 'ftp://geodesy.noaa.gov/cors/rinex/'
try:
filepath = download_file(url_base, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError:
print("File not downloaded, check availability on server.")
return None
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,466
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika/__init__.py
|
#from .astro_dog import AstroDog
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,467
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/op_params.py
|
#!/usr/bin/env python3
import os
import json
from common.travis_checker import travis
from common.colors import opParams_error as error
from common.colors import opParams_warning as warning
try:
from common.realtime import sec_since_boot
except ImportError:
import time
sec_since_boot = time.time
warning("Using python time.time() instead of faster sec_since_boot")
class ValueTypes:
number = [float, int]
none_or_number = [type(None), float, int]
class Param:
def __init__(self, default=None, allowed_types=[], description=None, live=False, hidden=False, depends_on=None): # pylint: disable=dangerous-default-value
self.default = default
if not isinstance(allowed_types, list):
allowed_types = [allowed_types]
self.allowed_types = allowed_types
self.description = description
self.hidden = hidden
self.live = live
self.depends_on = depends_on
self.children = []
self._create_attrs()
def is_valid(self, value):
if not self.has_allowed_types:
return True
if self.is_list and isinstance(value, list):
for v in value:
if type(v) not in self.allowed_types:
return False
return True
else:
return type(value) in self.allowed_types or value in self.allowed_types
def _create_attrs(self): # Create attributes and check Param is valid
self.has_allowed_types = isinstance(self.allowed_types, list) and len(self.allowed_types) > 0
self.has_description = self.description is not None
self.is_list = list in self.allowed_types
self.is_bool = bool in self.allowed_types
if self.has_allowed_types:
assert type(self.default) in self.allowed_types or self.default in self.allowed_types, 'Default value type must be in specified allowed_types!'
if self.is_list and self.default:
for v in self.default:
assert type(v) in self.allowed_types, 'Default value type must be in specified allowed_types!'
class opParams:
def __init__(self):
"""
To add your own parameter to opParams in your fork, simply add a new entry in self.fork_params, instancing a new Param class with at minimum a default value.
The allowed_types and description args are not required but highly recommended to help users edit their parameters with opEdit safely.
- The description value will be shown to users when they use opEdit to change the value of the parameter.
- The allowed_types arg is used to restrict what kinds of values can be entered with opEdit so that users can't crash openpilot with unintended behavior.
(setting a param intended to be a number with a boolean, or viceversa for example)
Limiting the range of floats or integers is still recommended when `.get`ting the parameter.
When a None value is allowed, use `type(None)` instead of None, as opEdit checks the type against the values in the arg with `isinstance()`.
- Finally, the live arg tells both opParams and opEdit that it's a live parameter that will change. Therefore, you must place the `op_params.get()` call in the update function so that it can update.
Here's an example of a good fork_param entry:
self.fork_params = {'camera_offset': Param(default=0.06, allowed_types=VT.number)} # VT.number allows both floats and ints
"""
VT = ValueTypes()
self.fork_params = {'awareness_factor': Param(10., VT.number, 'Multiplier for the awareness times'),
#'alca_min_speed': Param(20, VT.number, 'Speed limit to start ALC in MPH'),
#'alca_nudge_required': Param(False, bool, "Require nudge to start ALC"),
#'autoUpdate': Param(True, bool, 'Whether to auto-update'),
#'camera_offset': Param(0.06, VT.number, 'Your camera offset to use in lane_planner.py', live=True),
'curvature_factor': Param(1.2, VT.number, 'Multiplier for the curvature slowdown. Increase for less braking.'),
'cloak': Param(True, bool, "make comma believe you are on their fork"),
#'corolla_tss2_d_tuning': Param(False, bool, 'lateral tuning using PID w/ true derivative'),
'default_brake_distance': Param(250.0, VT.number, 'Distance in m to start braking for mapped speeds.'),
#'enable_long_derivative': Param(False, bool, 'If you have longitudinal overshooting, enable this! This enables derivative-based\n'
# 'integral wind-down to help reduce overshooting within the long PID loop'),
#'dynamic_follow': Param('normal', str, "Can be: ('close', 'normal', 'far'): Left to right increases in following distance.\n"
#"All profiles support dynamic follow so you'll get your preferred distance while\n"
#"retaining the smoothness and safety of dynamic follow!", live=True),
#'eco_mode': Param(False, bool, "Default to eco instead of normal."),
#'force_pedal': Param(False, bool, "If openpilot isn't recognizing your comma pedal, set this to True"),
#'global_df_mod': Param(None, VT.none_or_number, 'The multiplier for the current distance used by dynamic follow. The range is limited from 0.85 to 1.2\n'
#'Smaller values will get you closer, larger will get you farther\n'
#'This is multiplied by any profile that\'s active. Set to None to disable', live=True),
#'hide_auto_df_alerts': Param(True, bool, 'Hides the alert that shows what profile the model has chosen'),
#'hotspot_on_boot': Param(False, bool, 'Enable Hotspot On Boot'),
'keep_openpilot_engaged': Param(True, bool, 'True is stock behavior in this fork. False lets you use the brake and cruise control stalk to disengage as usual'),
#'lat_d': Param(9.0, VT.number, 'The lateral derivative gain, default is 9.0 for TSS2 Corolla. This is active at all speeds', live=True),
'limit_rsa': Param(False, bool, "Switch off RSA above rsa_max_speed"),
'interbridged': Param(False, bool, "ONLY USE IT FOR TESTING PURPOSE. You are responsible for your own action. we do not recommend using it if you don't know what youre doing"),
#'ludicrous_mode': Param(False, bool, 'Double overall acceleration!'),
'mpc_offset': Param(0.0, VT.number, 'Offset model braking by how many m/s. Lower numbers equals more model braking', live=True),
#'NoctuaMode': Param(False, bool, 'Noctua Fan are super quite and they run at full speed at all time.'),
'offset_limit': Param(0, VT.number, 'Speed at which apk percent offset will work in m/s'),
'osm': Param(True, bool, 'Whether to use OSM for drives'),
'prius_pid': Param(False, bool, 'This enables the PID lateral controller with new a experimental derivative tune\nFalse: stock INDI, True: TSS2-tuned PID'),
'rolling_stop': Param(False, bool, 'If you do not want stop signs to go down to 0 kph enable this for 9kph slow down'),
'rsa_max_speed': Param(24.5, VT.number, 'Speed limit to ignore RSA in m/s'),
'smart_speed': Param(True, bool, 'Whether to use Smart Speed for drives above smart_speed_max_vego'),
'smart_speed_max_vego': Param(26.8, VT.number, 'Speed limit to ignore Smartspeed in m/s'),
#'spairrowtuning': Param(False, bool, 'INDI Tuning for Corolla Tss2, set steer_up_15 param to True and flash panda'),
'speed_offset': Param(0, VT.number, 'Speed limit offset in m/s', live=True),
'steer_actuator_delay': Param(0.5, VT.number, 'The steer actuator delay', live=True),
#'steer_up_15': Param(False, bool, 'Increase rate of steering up to 15, may fault on some cars'),
#'traffic_light_alerts': Param(False, bool, "Switch off the traffic light alerts"),
'traffic_lights': Param(False, bool, "Should Openpilot stop for traffic lights"),
'traffic_lights_without_direction': Param(False, bool, "Should Openpilot stop for traffic lights without a direction specified"),
#'use_car_caching': Param(True, bool, 'Whether to use fingerprint caching'),
#'min_TR': Param(None, VT.none_or_number, 'The minimum allowed following distance in seconds. Default is 0.9 seconds.\n'
#'The range is limited from 0.85 to 1.3. Set to None to disable', live=True),
#'use_virtual_middle_line': Param(False, bool, 'For roads over 4m wide, hug right. For roads under 2m wide, hug left.'),
'uniqueID': Param(None, [type(None), str], 'User\'s unique ID'),
'update_behavior': Param('auto', str, 'Can be: (\'off\', \'alert\', \'auto\') without quotes\n'
'off will never update, alert shows an alert on-screen\n'
'auto will reboot the device when an update is seen'),
'enable_indi_live': Param(False, bool, live=True),
'indi_inner_gain_bp': Param([18, 22, 26], [list, float, int], live=True, depends_on='enable_indi_live'),
'indi_inner_gain_v': Param([9, 12, 15], [list, float, int], live=True, depends_on='enable_indi_live'),
'indi_outer_gain_bp': Param([18, 22, 26], [list, float, int], live=True, depends_on='enable_indi_live'),
'indi_outer_gain_v': Param([8, 11, 14.99], [list, float, int], live=True, depends_on='enable_indi_live'),
'indi_time_constant_bp': Param([18, 22, 26], [list, float, int], live=True, depends_on='enable_indi_live'),
'indi_time_constant_v': Param([1, 3, 4.5], [list, float, int], live=True, depends_on='enable_indi_live'),
'indi_actuator_effectiveness_bp': Param([18, 22, 26], [list, float, int], live=True, depends_on='enable_indi_live'),
'indi_actuator_effectiveness_v': Param([9, 12, 15], [list, float, int], live=True, depends_on='enable_indi_live'),
'steer_limit_timer': Param(0.4, VT.number, live=True, depends_on='enable_indi_live')
}
self._params_file = '/data/op_params.json'
self._backup_file = '/data/op_params_corrupt.json'
self._last_read_time = sec_since_boot()
self.read_frequency = 2.5 # max frequency to read with self.get(...) (sec)
self._to_delete = ['reset_integral', 'log_data'] # a list of params you want to delete (unused)
self._last_mod_time = 0.
self._run_init() # restores, reads, and updates params
def _run_init(self): # does first time initializing of default params
# Two required parameters for opEdit
self.fork_params['username'] = Param(None, [type(None), str, bool], 'Your identifier provided with any crash logs sent to Sentry.\nHelps the developer reach out to you if anything goes wrong')
self.fork_params['op_edit_live_mode'] = Param(False, bool, 'This parameter controls which mode opEdit starts in', hidden=True)
self.params = self._get_all_params(default=True) # in case file is corrupted
for k, p in self.fork_params.items():
d = p.depends_on
while d:
fp = self.fork_params[d]
fp.children.append(k)
d = fp.depends_on
if travis:
return
if os.path.isfile(self._params_file):
if self._read():
to_write = self._add_default_params() # if new default data has been added
to_write |= self._delete_old() # or if old params have been deleted
else: # backup and re-create params file
error("Can't read op_params.json file, backing up to /data/op_params_corrupt.json and re-creating file!")
to_write = True
if os.path.isfile(self._backup_file):
os.remove(self._backup_file)
os.rename(self._params_file, self._backup_file)
else:
to_write = True # user's first time running a fork with op_params, write default params
if to_write:
self._write()
os.chmod(self._params_file, 0o764)
def get(self, key=None, force_live=False): # any params you try to get MUST be in fork_params
param_info = self.param_info(key)
self._update_params(param_info, force_live)
if key is None:
return self._get_all_params()
self._check_key_exists(key, 'get')
value = self.params[key]
if param_info.is_valid(value): # always valid if no allowed types, otherwise checks to make sure
return value # all good, returning user's value
warning('User\'s value type is not valid! Returning default') # somehow... it should always be valid
return param_info.default # return default value because user's value of key is not in allowed_types to avoid crashing openpilot
def put(self, key, value):
self._check_key_exists(key, 'put')
if not self.param_info(key).is_valid(value):
raise Exception('opParams: Tried to put a value of invalid type!')
self.params.update({key: value})
self._write()
def delete(self, key): # todo: might be obsolete. remove?
if key in self.params:
del self.params[key]
self._write()
def param_info(self, key):
if key in self.fork_params:
return self.fork_params[key]
return Param()
def _check_key_exists(self, key, met):
if key not in self.fork_params or key not in self.params:
raise Exception('opParams: Tried to {} an unknown parameter! Key not in fork_params: {}'.format(met, key))
def _add_default_params(self):
added = False
for key, param in self.fork_params.items():
if key not in self.params:
self.params[key] = param.default
added = True
elif not param.is_valid(self.params[key]):
warning('Value type of user\'s {} param not in allowed types, replacing with default!'.format(key))
self.params[key] = param.default
added = True
return added
def _delete_old(self):
deleted = False
for param in self._to_delete:
if param in self.params:
del self.params[param]
deleted = True
return deleted
def _get_all_params(self, default=False, return_hidden=False):
if default:
return {k: p.default for k, p in self.fork_params.items()}
return {k: self.params[k] for k, p in self.fork_params.items() if k in self.params and (not p.hidden or return_hidden)}
def _update_params(self, param_info, force_live):
if force_live or param_info.live: # if is a live param, we want to get updates while openpilot is running
if not travis and sec_since_boot() - self._last_read_time >= self.read_frequency: # make sure we aren't reading file too often
if self._read():
self._last_read_time = sec_since_boot()
def _read(self):
if os.path.isfile(self._params_file):
try:
mod_time = os.path.getmtime(self._params_file)
if mod_time > self._last_mod_time:
with open(self._params_file, "r") as f:
self.params = json.loads(f.read())
self._last_mod_time = mod_time
return True
else:
return False
except Exception as e:
print("Unable to read file: " + str(e))
return False
def _write(self):
if not travis:
with open(self._params_file, "w") as f:
f.write(json.dumps(self.params, indent=2)) # can further speed it up by remove indentation but makes file hard to read
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,468
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/laika/astro_dog.py
|
from collections import defaultdict
from .helpers import get_constellation, get_closest, get_el_az, TimeRangeHolder
from .ephemeris import parse_sp3_orbits, parse_rinex_nav_msg_gps, \
parse_rinex_nav_msg_glonass
from .downloader import download_orbits, download_orbits_russia, download_nav, download_ionex, download_dcb
from .downloader import download_cors_station
from .trop import saast
from .iono import parse_ionex
from .dcb import parse_dcbs
from .dgps import get_closest_station_names, parse_dgps
from . import constants
MAX_DGPS_DISTANCE = 100000 # in meters, because we're not barbarians
class AstroDog(object):
'''
auto_update: flag indicating whether laika should fetch files from web
cache_dir: directory where data files are downloaded to and cached
pull_orbit: flag indicating whether laika should fetch sp3 orbits
instead of nav files (orbits are more accurate)
dgps: flag indicating whether laika should use dgps (CORS)
data to calculate pseudorange corrections
valid_const: list of constellation identifiers laika will try process
'''
def __init__(self, auto_update=True,
cache_dir='/tmp/gnss/',
pull_orbit=True, dgps=False,
valid_const=['GPS', 'GLONASS']):
self.auto_update = auto_update
self.cache_dir = cache_dir
self.dgps = dgps
self.dgps_delays = []
self.ionex_maps = []
self.pull_orbit = pull_orbit
self.valid_const = valid_const
self.cached_ionex = None
self.cached_dgps = None
self.orbit_fetched_times = TimeRangeHolder()
self.nav_fetched_times = TimeRangeHolder()
self.dcbs_fetched_times = TimeRangeHolder()
self.orbits = defaultdict(lambda: [])
self.nav = defaultdict(lambda: [])
self.dcbs = defaultdict(lambda: [])
self.cached_orbit = defaultdict(lambda: None)
self.cached_nav = defaultdict(lambda: None)
self.cached_dcb = defaultdict(lambda: None)
def get_ionex(self, time):
if self.cached_ionex is not None and self.cached_ionex.valid(time):
return self.cached_ionex
self.cached_ionex = get_closest(time, self.ionex_maps)
if self.cached_ionex is not None and self.cached_ionex.valid(time):
return self.cached_ionex
self.get_ionex_data(time)
self.cached_ionex = get_closest(time, self.ionex_maps)
if self.cached_ionex is not None and self.cached_ionex.valid(time):
return self.cached_ionex
elif self.auto_update:
raise RuntimeError("Pulled ionex, but still can't get valid for time " + str(time))
else:
return None
def get_nav(self, prn, time):
if self.cached_nav[prn] is not None and self.cached_nav[prn].valid(time):
return self.cached_nav[prn]
self.cached_nav[prn] = get_closest(time, self.nav[prn])
if self.cached_nav[prn] is not None and self.cached_nav[prn].valid(time):
return self.cached_nav[prn]
# Already fetched, but no data found
if time in self.nav_fetched_times:
return None
self.get_nav_data(time)
self.cached_nav[prn] = get_closest(time, self.nav[prn])
if self.cached_nav[prn] is not None and self.cached_nav[prn].valid(time):
return self.cached_nav[prn]
else:
return None
@staticmethod
def _select_valid_temporal_items(item_dict, time, cache):
'''Returns only valid temporal item for specific time from currently fetched
data.'''
result = {}
for prn, temporal_objects in item_dict.items():
cached = cache[prn]
if cached is not None and cached.valid(time):
obj = cached
else:
obj = get_closest(time, temporal_objects)
if obj is None or not obj.valid(time):
continue
cache[prn] = obj
result[prn] = obj
return result
def get_navs(self, time):
if time in self.nav_fetched_times:
valid_navs = AstroDog._select_valid_temporal_items(self.nav, time,
self.cached_nav)
else:
self.get_nav_data(time)
valid_navs = AstroDog._select_valid_temporal_items(self.nav, time,
self.cached_nav)
return valid_navs
def get_orbit(self, prn, time):
if self.cached_orbit[prn] is not None and self.cached_orbit[prn].valid(time):
return self.cached_orbit[prn]
self.cached_orbit[prn] = get_closest(time, self.orbits[prn])
if self.cached_orbit[prn] is not None and self.cached_orbit[prn].valid(time):
return self.cached_orbit[prn]
# Already fetched, but no data found
if time in self.orbit_fetched_times:
return None
self.get_orbit_data(time)
self.cached_orbit[prn] = get_closest(time, self.orbits[prn])
if self.cached_orbit[prn] is not None and self.cached_orbit[prn].valid(time):
return self.cached_orbit[prn]
else:
return None
def get_orbits(self, time):
if time in self.orbit_fetched_times:
valid_orbits = AstroDog._select_valid_temporal_items(self.orbits, time,
self.cached_orbit)
else:
self.get_orbit_data(time)
valid_orbits = AstroDog._select_valid_temporal_items(self.orbits, time,
self.cached_orbit)
return valid_orbits
def get_dcb(self, prn, time):
if self.cached_dcb[prn] is not None and self.cached_dcb[prn].valid(time):
return self.cached_dcb[prn]
self.cached_dcb[prn] = get_closest(time, self.dcbs[prn])
if self.cached_dcb[prn] is not None and self.cached_dcb[prn].valid(time):
return self.cached_dcb[prn]
# Already fetched, but no data found
if time in self.dcbs_fetched_times:
return None
self.get_dcb_data(time)
self.cached_dcb[prn] = get_closest(time, self.dcbs[prn])
if self.cached_dcb[prn] is not None and self.cached_dcb[prn].valid(time):
return self.cached_dcb[prn]
else:
return None
def get_dgps_corrections(self, time, recv_pos):
if self.cached_dgps is not None and self.cached_dgps.valid(time, recv_pos):
return self.cached_dgps
self.cached_dgps = get_closest(time, self.dgps_delays, recv_pos=recv_pos)
if self.cached_dgps is not None and self.cached_dgps.valid(time, recv_pos):
return self.cached_dgps
self.get_dgps_data(time, recv_pos)
self.cached_dgps = get_closest(time, self.dgps_delays, recv_pos=recv_pos)
if self.cached_dgps is not None and self.cached_dgps.valid(time, recv_pos):
return self.cached_dgps
elif self.auto_update:
raise RuntimeError("Pulled dgps, but still can't get valid for time " + str(time))
else:
return None
def add_ephem(self, new_ephem, ephems):
prn = new_ephem.prn
# TODO make this check work
# for eph in ephems[prn]:
# if eph.type == new_ephem.type and eph.epoch == new_ephem.epoch:
# raise RuntimeError('Trying to add an ephemeris that is already there, something is wrong')
ephems[prn].append(new_ephem)
def get_nav_data(self, time):
ephems_gps, ephems_glonass = [], []
if 'GPS' in self.valid_const:
file_path_gps = download_nav(time, cache_dir=self.cache_dir, constellation='GPS')
if file_path_gps:
ephems_gps = parse_rinex_nav_msg_gps(file_path_gps)
if 'GLONASS' in self.valid_const:
file_path_glonass = download_nav(time, cache_dir=self.cache_dir, constellation='GLONASS')
if file_path_glonass:
ephems_glonass = parse_rinex_nav_msg_glonass(file_path_glonass)
fetched_ephems = (ephems_gps + ephems_glonass)
for ephem in fetched_ephems:
self.add_ephem(ephem, self.nav)
if len(fetched_ephems) != 0:
min_ephem = min(fetched_ephems, key=lambda e: e.epoch)
max_ephem = max(fetched_ephems, key=lambda e: e.epoch)
min_epoch = min_ephem.epoch - min_ephem.max_time_diff
max_epoch = max_ephem.epoch + max_ephem.max_time_diff
self.nav_fetched_times.add(min_epoch, max_epoch)
def get_orbit_data(self, time):
file_paths_sp3_ru = download_orbits_russia(time, cache_dir=self.cache_dir)
ephems_sp3_ru = parse_sp3_orbits(file_paths_sp3_ru, self.valid_const)
file_paths_sp3_us = download_orbits(time, cache_dir=self.cache_dir)
ephems_sp3_us = parse_sp3_orbits(file_paths_sp3_us, self.valid_const)
ephems_sp3 = ephems_sp3_ru + ephems_sp3_us
if len(ephems_sp3) < 5:
raise RuntimeError('No orbit data found on either servers')
for ephem in ephems_sp3:
self.add_ephem(ephem, self.orbits)
if len(ephems_sp3) != 0:
min_ephem = min(ephems_sp3, key=lambda e: e.epoch)
max_ephem = max(ephems_sp3, key=lambda e: e.epoch)
min_epoch = min_ephem.epoch - min_ephem.max_time_diff
max_epoch = max_ephem.epoch + max_ephem.max_time_diff
self.orbit_fetched_times.add(min_epoch, max_epoch)
def get_dcb_data(self, time):
file_path_dcb = download_dcb(time, cache_dir=self.cache_dir)
dcbs = parse_dcbs(file_path_dcb, self.valid_const)
for dcb in dcbs:
self.dcbs[dcb.prn].append(dcb)
if len(dcbs) != 0:
min_dcb = min(dcbs, key=lambda e: e.epoch)
max_dcb = max(dcbs, key=lambda e: e.epoch)
min_epoch = min_dcb.epoch - min_dcb.max_time_diff
max_epoch = max_dcb.epoch + max_dcb.max_time_diff
self.dcbs_fetched_times.add(min_epoch, max_epoch)
def get_ionex_data(self, time):
file_path_ionex = download_ionex(time, cache_dir=self.cache_dir)
ionex_maps = parse_ionex(file_path_ionex)
for im in ionex_maps:
self.ionex_maps.append(im)
def get_dgps_data(self, time, recv_pos):
station_names = get_closest_station_names(recv_pos, k=8, max_distance=MAX_DGPS_DISTANCE, cache_dir=self.cache_dir)
for station_name in station_names:
file_path_station = download_cors_station(time, station_name, cache_dir=self.cache_dir)
if file_path_station:
dgps = parse_dgps(station_name, file_path_station,
self, max_distance=MAX_DGPS_DISTANCE,
required_constellations=self.valid_const)
if dgps is not None:
self.dgps_delays.append(dgps)
break
def get_tgd_from_nav(self, prn, time):
if get_constellation(prn) not in self.valid_const:
return None
eph = self.get_nav(prn, time)
if eph:
return eph.get_tgd()
else:
return None
def get_sat_info(self, prn, time):
if get_constellation(prn) not in self.valid_const:
return None
if self.pull_orbit:
eph = self.get_orbit(prn, time)
else:
eph = self.get_nav(prn, time)
if eph:
return eph.get_sat_info(time)
else:
return None
def get_all_sat_info(self, time):
if self.pull_orbit:
ephs = self.get_orbits(time)
else:
ephs = self.get_navs(time)
return {prn: eph.get_sat_info(time) for prn, eph in ephs.items()}
def get_glonass_channel(self, prn, time):
nav = self.get_nav(prn, time)
if nav:
return nav.channel
else:
return None
def get_frequency(self, prn, time, signal='C1C'):
if get_constellation(prn) == 'GPS':
if signal[1] == '1':
return constants.GPS_L1
elif signal[1] == '2':
return constants.GPS_L2
elif signal[1] == '5':
return constants.GPS_L5
elif signal[1] == '6':
return constants.GALILEO_E6
elif signal[1] == '7':
return constants.GALILEO_E5B
elif signal[1] == '8':
return constants.GALILEO_E5AB
else:
raise NotImplementedError('Dont know this GPS frequency: ', signal, prn)
elif get_constellation(prn) == 'GLONASS':
n = self.get_glonass_channel(prn, time)
if n is None:
return None
if signal[1] == '1':
return constants.GLONASS_L1 + n * constants.GLONASS_L1_DELTA
if signal[1] == '2':
return constants.GLONASS_L2 + n * constants.GLONASS_L2_DELTA
if signal[1] == '5':
return constants.GLONASS_L5 + n * constants.GLONASS_L5_DELTA
if signal[1] == '6':
return constants.GALILEO_E6
if signal[1] == '7':
return constants.GALILEO_E5B
if signal[1] == '8':
return constants.GALILEO_E5AB
else:
raise NotImplementedError('Dont know this GLONASS frequency: ', signal, prn)
def get_delay(self, prn, time, rcv_pos, no_dgps=False, signal='C1C', freq=None):
sat_info = self.get_sat_info(prn, time)
if sat_info is None:
return None
sat_pos = sat_info[0]
el, az = get_el_az(rcv_pos, sat_pos)
if el < 0.2:
return None
if self.dgps and not no_dgps:
dgps_corrections = self.get_dgps_corrections(time, rcv_pos)
if dgps_corrections is None:
return None
dgps_delay = dgps_corrections.get_delay(prn, time)
if dgps_delay is None:
return None
return dgps_corrections.get_delay(prn, time)
else:
if not freq:
freq = self.get_frequency(prn, time, signal)
ionex = self.get_ionex(time)
dcb = self.get_dcb(prn, time)
if ionex is None or dcb is None or freq is None:
return None
iono_delay = ionex.get_delay(rcv_pos, az, el, sat_pos, time, freq)
trop_delay = saast(rcv_pos, el)
code_bias = dcb.get_delay(signal)
return iono_delay + trop_delay + code_bias
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,469
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/tests/test_positioning.py
|
import numpy as np
from tqdm import tqdm
import unittest
from laika import AstroDog
from shutil import copyfile
import os
from datetime import datetime
from laika.gps_time import GPSTime
from laika.downloader import download_cors_station
from laika.rinex_file import RINEXFile
from laika.dgps import get_station_position
import laika.raw_gnss as raw
class TestPositioning(unittest.TestCase):
@unittest.skip("Takes way too long to download for ci")
def test_station_position(self):
print('WARNING THIS TAKE CAN TAKE A VERY LONG TIME THE FIRST RUN TO DOWNLOAD')
dog = AstroDog()
# Building this cache takes forever just copy it from repo
cache_directory = '/tmp/gnss/cors_coord/'
try:
os.mkdir('/tmp/gnss/')
except OSError:
pass
try:
os.mkdir(cache_directory)
except OSError:
pass
examples_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../examples')
copyfile(os.path.join(examples_directory, 'cors_station_positions'), os.path.join(cache_directory, 'cors_station_positions'))
station_name = 'sc01'
time = GPSTime.from_datetime(datetime(2020, 1, 11))
slac_rinex_obs_file = download_cors_station(time, station_name, dog.cache_dir)
obs_data = RINEXFile(slac_rinex_obs_file)
sc01_exact_position = get_station_position('sc01')
rinex_meas_grouped = raw.read_rinex_obs(obs_data)
rinex_corr_grouped = []
for meas in tqdm(rinex_meas_grouped):
# proc = raw.process_measurements(meas, dog=dog)
corr = raw.correct_measurements(meas, sc01_exact_position, dog=dog)
rinex_corr_grouped.append(corr)
# Using laika's WLS solver we can now calculate position
# fixes for every epoch (every 30s) over 24h.
ests = []
for corr in tqdm(rinex_corr_grouped[:]):
fix, _ = raw.calc_pos_fix(corr)
ests.append(fix)
ests = np.array(ests)
mean_fix = np.mean(ests[:, :3], axis=0)
np.testing.assert_allclose(mean_fix, sc01_exact_position, rtol=0, atol=1)
if __name__ == "__main__":
unittest.main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,470
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/test_openpilot.py
|
# flake8: noqa
import os
os.environ['FAKEUPLOAD'] = "1"
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.manager import manager_init, manager_prepare, start_daemon_process
from selfdrive.test.helpers import phone_only, with_processes, set_params_enabled
import json
import requests
import signal
import subprocess
import time
# must run first
@phone_only
def test_manager_prepare():
set_params_enabled()
manager_init()
manager_prepare()
@phone_only
@with_processes(['loggerd', 'logmessaged', 'tombstoned', 'proclogd', 'logcatd'])
def test_logging():
print("LOGGING IS SET UP")
time.sleep(1.0)
@phone_only
@with_processes(['camerad', 'modeld', 'dmonitoringmodeld'])
def test_visiond():
print("VISIOND IS SET UP")
time.sleep(5.0)
@phone_only
@with_processes(['sensord'])
def test_sensord():
print("SENSORS ARE SET UP")
time.sleep(1.0)
@phone_only
@with_processes(['ui'])
def test_ui():
print("RUNNING UI")
time.sleep(1.0)
# will have one thing to upload if loggerd ran
# TODO: assert it actually uploaded
@phone_only
@with_processes(['uploader'])
def test_uploader():
print("UPLOADER")
time.sleep(10.0)
@phone_only
def test_athena():
print("ATHENA")
start = sec_since_boot()
start_daemon_process("manage_athenad")
params = Params()
manage_athenad_pid = params.get("AthenadPid")
assert manage_athenad_pid is not None
try:
os.kill(int(manage_athenad_pid), 0)
# process is running
except OSError:
assert False, "manage_athenad is dead"
def expect_athena_starts(timeout=30):
now = time.time()
athenad_pid = None
while athenad_pid is None:
try:
athenad_pid = subprocess.check_output(["pgrep", "-P", manage_athenad_pid], encoding="utf-8").strip()
return athenad_pid
except subprocess.CalledProcessError:
if time.time() - now > timeout:
assert False, f"Athena did not start within {timeout} seconds"
time.sleep(0.5)
def athena_post(payload, max_retries=5, wait=5):
tries = 0
while 1:
try:
resp = requests.post(
"https://athena.comma.ai/" + params.get("DongleId", encoding="utf-8"),
headers={
"Authorization": "JWT " + os.getenv("COMMA_JWT"),
"Content-Type": "application/json"
},
data=json.dumps(payload),
timeout=30
)
resp_json = resp.json()
if resp_json.get('error'):
raise Exception(resp_json['error'])
return resp_json
except Exception as e:
time.sleep(wait)
tries += 1
if tries == max_retries:
raise
else:
print(f'athena_post failed {e}. retrying...')
def expect_athena_registers(test_t0):
resp = athena_post({
"method": "echo",
"params": ["hello"],
"id": 0,
"jsonrpc": "2.0"
}, max_retries=12, wait=5)
assert resp.get('result') == "hello", f'Athena failed to register ({resp})'
last_pingtime = params.get("LastAthenaPingTime", encoding='utf8')
assert last_pingtime, last_pingtime
assert ((int(last_pingtime)/1e9) - test_t0) < (sec_since_boot() - test_t0)
try:
athenad_pid = expect_athena_starts()
# kill athenad and ensure it is restarted (check_output will throw if it is not)
os.kill(int(athenad_pid), signal.SIGINT)
expect_athena_starts()
if not os.getenv('COMMA_JWT'):
print('WARNING: COMMA_JWT env not set, will not test requests to athena.comma.ai')
return
expect_athena_registers(start)
print("ATHENA: getSimInfo")
resp = athena_post({
"method": "getSimInfo",
"id": 0,
"jsonrpc": "2.0"
})
assert resp.get('result'), resp
assert 'sim_id' in resp['result'], resp['result']
print("ATHENA: takeSnapshot")
resp = athena_post({
"method": "takeSnapshot",
"id": 0,
"jsonrpc": "2.0"
})
assert resp.get('result'), resp
assert resp['result']['jpegBack'], resp['result']
@with_processes(["thermald"])
def test_athena_thermal():
print("ATHENA: getMessage(thermal)")
resp = athena_post({
"method": "getMessage",
"params": {"service": "thermal", "timeout": 5000},
"id": 0,
"jsonrpc": "2.0"
})
assert resp.get('result'), resp
assert resp['result']['thermal'], resp['result']
test_athena_thermal()
finally:
try:
athenad_pid = subprocess.check_output(["pgrep", "-P", manage_athenad_pid], encoding="utf-8").strip()
except subprocess.CalledProcessError:
athenad_pid = None
try:
os.kill(int(manage_athenad_pid), signal.SIGINT)
os.kill(int(athenad_pid), signal.SIGINT)
except (OSError, TypeError):
pass
# TODO: re-enable when jenkins test has /data/pythonpath -> /data/openpilot
# @phone_only
# @with_apks()
# def test_apks():
# print("APKS")
# time.sleep(14.0)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,471
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/replay/camera.py
|
#!/usr/bin/env python3
import os
from common.basedir import BASEDIR
os.environ['BASEDIR'] = BASEDIR
SCALE = float(os.getenv("SCALE", "1"))
import argparse
import pygame # pylint: disable=import-error
import numpy as np
import cv2 # pylint: disable=import-error
import sys
import cereal.messaging as messaging
_BB_OFFSET = 0, 0
_BB_TO_FULL_FRAME = np.asarray([[1., 0., _BB_OFFSET[0]], [0., 1., _BB_OFFSET[1]],
[0., 0., 1.]])
_FULL_FRAME_TO_BB = np.linalg.inv(_BB_TO_FULL_FRAME)
_FULL_FRAME_SIZE = 1164, 874
def pygame_modules_have_loaded():
return pygame.display.get_init() and pygame.font.get_init()
def ui_thread(addr, frame_address):
pygame.init()
pygame.font.init()
assert pygame_modules_have_loaded()
size = (int(_FULL_FRAME_SIZE[0] * SCALE), int(_FULL_FRAME_SIZE[1] * SCALE))
print(size)
pygame.display.set_caption("comma one debug UI")
screen = pygame.display.set_mode(size, pygame.DOUBLEBUF)
camera_surface = pygame.surface.Surface((_FULL_FRAME_SIZE[0] * SCALE, _FULL_FRAME_SIZE[1] * SCALE), 0, 24).convert()
frame = messaging.sub_sock('frame', conflate=True)
img = np.zeros((_FULL_FRAME_SIZE[1], _FULL_FRAME_SIZE[0], 3), dtype='uint8')
imgff = np.zeros((_FULL_FRAME_SIZE[1], _FULL_FRAME_SIZE[0], 3), dtype=np.uint8)
while 1:
list(pygame.event.get())
screen.fill((64, 64, 64))
# ***** frame *****
fpkt = messaging.recv_one(frame)
yuv_img = fpkt.frame.image
if fpkt.frame.transform:
yuv_transform = np.array(fpkt.frame.transform).reshape(3, 3)
else:
# assume frame is flipped
yuv_transform = np.array([[-1.0, 0.0, _FULL_FRAME_SIZE[0] - 1],
[0.0, -1.0, _FULL_FRAME_SIZE[1] - 1], [0.0, 0.0, 1.0]])
if yuv_img and len(yuv_img) == _FULL_FRAME_SIZE[0] * _FULL_FRAME_SIZE[1] * 3 // 2:
yuv_np = np.frombuffer(
yuv_img, dtype=np.uint8).reshape(_FULL_FRAME_SIZE[1] * 3 // 2, -1)
cv2.cvtColor(yuv_np, cv2.COLOR_YUV2RGB_I420, dst=imgff)
cv2.warpAffine(
imgff,
np.dot(yuv_transform, _BB_TO_FULL_FRAME)[:2], (img.shape[1], img.shape[0]),
dst=img,
flags=cv2.WARP_INVERSE_MAP)
else:
# actually RGB
img = np.frombuffer(yuv_img, dtype=np.uint8).reshape((_FULL_FRAME_SIZE[1], _FULL_FRAME_SIZE[0], 3))
img = img[:, :, ::-1] # Convert BGR to RGB
height, width = img.shape[:2]
img_resized = cv2.resize(
img, (int(SCALE * width), int(SCALE * height)), interpolation=cv2.INTER_CUBIC)
# *** blits ***
pygame.surfarray.blit_array(camera_surface, img_resized.swapaxes(0, 1))
screen.blit(camera_surface, (0, 0))
# this takes time...vsync or something
pygame.display.flip()
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Show replay data in a UI.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"ip_address",
nargs="?",
default="127.0.0.1",
help="The ip address on which to receive zmq messages.")
parser.add_argument(
"--frame-address",
default=None,
help="The ip address on which to receive zmq messages.")
return parser
if __name__ == "__main__":
args = get_arg_parser().parse_args(sys.argv[1:])
ui_thread(args.ip_address, args.frame_address)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,472
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/livedm/livedm.py
|
#!/usr/bin/env python3
import os
import argparse
import pygame # pylint: disable=import-error
import numpy as np
import cv2 # pylint: disable=import-error
from cereal import log
import cereal.messaging as messaging
from helpers import draw_pose
if __name__ == "__main__":
os.environ["ZMQ"] = "1"
parser = argparse.ArgumentParser(description='Sniff a communcation socket')
parser.add_argument('--addr', default='192.168.5.11')
args = parser.parse_args()
messaging.context = messaging.Context()
poller = messaging.Poller()
m = 'driverMonitoring'
messaging.sub_sock(m, poller, addr=args.addr)
pygame.init()
pygame.display.set_caption('livedm')
screen = pygame.display.set_mode((320, 640), pygame.DOUBLEBUF)
camera_surface = pygame.surface.Surface((160, 320), 0, 24).convert()
while 1:
polld = poller.poll(1000)
for sock in polld:
msg = sock.receive()
evt = log.Event.from_bytes(msg)
faceProb = np.array(evt.driverMonitoring.faceProb)
faceOrientation = np.array(evt.driverMonitoring.faceOrientation)
facePosition = np.array(evt.driverMonitoring.facePosition)
print(faceProb)
# print(faceOrientation)
# print(facePosition)
faceOrientation[1] *= -1
facePosition[0] *= -1
img = np.zeros((320, 160, 3))
if faceProb > 0.4:
cv2.putText(img, 'you', (int(facePosition[0]*160+40), int(facePosition[1]*320+110)), cv2.FONT_ITALIC, 0.5, (255, 255, 0))
cv2.rectangle(img, (int(facePosition[0]*160+40), int(facePosition[1]*320+120)),
(int(facePosition[0]*160+120), int(facePosition[1]*320+200)), (255, 255, 0), 1)
not_blink = evt.driverMonitoring.leftBlinkProb + evt.driverMonitoring.rightBlinkProb < 1
if evt.driverMonitoring.leftEyeProb > 0.6:
cv2.line(img, (int(facePosition[0]*160+95), int(facePosition[1]*320+140)),
(int(facePosition[0]*160+105), int(facePosition[1]*320+140)), (255, 255, 0), 2)
if not_blink:
cv2.line(img, (int(facePosition[0]*160+99), int(facePosition[1]*320+143)),
(int(facePosition[0]*160+101), int(facePosition[1]*320+143)), (255, 255, 0), 2)
if evt.driverMonitoring.rightEyeProb > 0.6:
cv2.line(img, (int(facePosition[0]*160+55), int(facePosition[1]*320+140)),
(int(facePosition[0]*160+65), int(facePosition[1]*320+140)), (255, 255, 0), 2)
if not_blink:
cv2.line(img, (int(facePosition[0]*160+59), int(facePosition[1]*320+143)),
(int(facePosition[0]*160+61), int(facePosition[1]*320+143)), (255, 255, 0), 2)
else:
cv2.putText(img, 'you not found', (int(facePosition[0]*160+40), int(facePosition[1]*320+110)), cv2.FONT_ITALIC, 0.5, (64, 64, 64))
draw_pose(img, faceOrientation, facePosition,
W=160, H=320, xyoffset=(0, 0), faceprob=faceProb)
pygame.surfarray.blit_array(camera_surface, img.swapaxes(0, 1))
camera_surface_2x = pygame.transform.scale2x(camera_surface)
screen.blit(camera_surface_2x, (0, 0))
pygame.display.flip()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,473
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/hardware_tici.py
|
import serial
from common.hardware_base import HardwareBase
from cereal import log
import subprocess
NetworkType = log.ThermalData.NetworkType
NetworkStrength = log.ThermalData.NetworkStrength
def run_at_command(cmd, timeout=0.1):
with serial.Serial("/dev/ttyUSB2", timeout=timeout) as ser:
ser.write(cmd + b"\r\n")
ser.readline() # Modem echos request
return ser.readline().decode().rstrip()
class Tici(HardwareBase):
def get_sound_card_online(self):
return True
def get_imei(self, slot):
if slot != 0:
return ""
for _ in range(10):
try:
imei = run_at_command(b"AT+CGSN")
if len(imei) == 15:
return imei
except serial.SerialException:
pass
raise RuntimeError("Error getting IMEI")
def get_serial(self):
return self.get_cmdline()['androidboot.serialno']
def get_subscriber_info(self):
return ""
def reboot(self, reason=None):
subprocess.check_output(["sudo", "reboot"])
def get_network_type(self):
return NetworkType.wifi
def get_sim_info(self):
return {
'sim_id': '',
'mcc_mnc': None,
'network_type': ["Unknown"],
'sim_state': ["ABSENT"],
'data_connected': False
}
def get_network_strength(self, network_type):
return NetworkStrength.unknown
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,474
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/laika/downloader.py
|
import ftplib
import gzip
import os
import urllib.request
from datetime import datetime
from urllib.parse import urlparse
from .constants import SECS_IN_DAY, SECS_IN_WEEK
from .gps_time import GPSTime
from .unlzw import unlzw
USE_COMMA_CACHE = True
def ftpcache_path(url):
p = urlparse(url)
return 'http://ftpcache.comma.life/'+p.netloc.replace(".", "-")+p.path
def retryable(f):
"""
Decorator to allow us to pass multiple URLs from which to download.
Automatically retry the request with the next URL on failure
"""
def wrapped(url_bases, *args, **kwargs):
if isinstance(url_bases, str):
# only one url passed, don't do the retry thing
return f(url_bases, *args, **kwargs)
# not a string, must be a list of url_bases
for url_base in url_bases:
try:
return f(url_base, *args, **kwargs)
except IOError as e:
print(e)
# none of them succeeded
raise IOError("Multiple URL failures attempting to pull file(s)")
return wrapped
def ftp_connect(url):
parsed = urlparse(url)
assert parsed.scheme == 'ftp'
try:
domain = parsed.netloc
ftp = ftplib.FTP(domain)
ftp.login()
except (OSError, ftplib.error_perm):
raise IOError("Could not connect/auth to: " + domain)
try:
ftp.cwd(parsed.path)
except ftplib.error_perm:
raise IOError("Permission failure with folder: " + url)
return ftp
@retryable
def list_dir(url):
try:
ftp = ftp_connect(url)
return ftp.nlst()
except ftplib.error_perm:
raise IOError("Permission failure listing folder: " + url)
def decompress(filepath_zipped, filepath, compression=''):
if compression == '':
return filepath_zipped
elif compression == '.gz':
f = gzip.open(filepath_zipped, 'rb')
uncompressed_data = f.read()
f.close()
elif compression == '.Z':
f = open(filepath_zipped, 'rb')
compressed_data = f.read()
uncompressed_data = unlzw(compressed_data)
f.close()
else:
raise NotImplementedError('unknown compression: ', compression)
f = open(filepath, 'wb')
f.write(uncompressed_data)
f.close()
return filepath
def ftp_download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
"""
Like download file, but more of them. Keeps a persistent FTP connection open
to be more efficient. Not "ftpcache.comma.life" aware
"""
folder_path_abs = os.path.join(cacheDir, folder_path)
ftp = ftp_connect(url_base + folder_path)
filepaths = []
for filename in filenames:
filename_zipped = filename + compression
filepath = os.path.join(folder_path_abs, filename)
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
print("pulling from", url_base, "to", filepath)
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
try:
ftp.retrbinary('RETR ' + filename_zipped, open(filepath_zipped, 'wb').write)
except (ftplib.error_perm):
raise IOError("Could not download file from: " + url_base + folder_path + filename_zipped)
filepaths.append(decompress(filepath_zipped, filepath, compression=compression))
else:
filepaths.append(filepath)
return filepaths
@retryable
def download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
if USE_COMMA_CACHE:
filepaths = []
for filename in filenames:
filepaths.append(download_file(
url_base, folder_path, cacheDir, filename, compression=compression, overwrite=overwrite
))
return filepaths
else:
return ftp_download_files(
url_base, folder_path, cacheDir, filenames, compression=compression, overwrite=overwrite
)
@retryable
def download_file(url_base, folder_path, cacheDir, filename, compression='', overwrite=False):
folder_path_abs = os.path.join(cacheDir, folder_path)
filename_zipped = filename + compression
filepath = os.path.join(folder_path_abs, filename)
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
url = url_base + folder_path + filename_zipped
url_cache = ftpcache_path(url)
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
downloaded = False
# try to download
global USE_COMMA_CACHE
if USE_COMMA_CACHE:
try:
print("pulling from", url_cache, "to", filepath)
urlf = urllib.request.urlopen(url_cache, timeout=5)
downloaded = True
except IOError as e:
print("cache download failed, pulling from", url, "to", filepath)
# commai cache not accessible (not just 404 or perms issue): don't keep trying it
if str(e.reason) == "timed out": # pylint: disable=no-member
print("disabling ftpcache.comma.life")
USE_COMMA_CACHE = False
if not downloaded:
print("cache download failed, pulling from", url, "to", filepath)
try:
urlf = urllib.request.urlopen(url)
except IOError:
raise IOError("Could not download file from: " + url)
data_zipped = urlf.read()
urlf.close()
with open(filepath_zipped, 'wb') as wf:
wf.write(data_zipped)
filepath = decompress(filepath_zipped, filepath, compression=compression)
return filepath
def download_nav(time, cache_dir, constellation='GPS'):
t = time.as_datetime()
try:
if GPSTime.from_datetime(datetime.utcnow()) - time > SECS_IN_DAY:
url_base = 'ftp://cddis.gsfc.nasa.gov/gnss/data/daily/'
cache_subdir = cache_dir + 'daily_nav/'
if constellation =='GPS':
filename = t.strftime("brdc%j0.%yn")
folder_path = t.strftime('%Y/%j/%yn/')
elif constellation =='GLONASS':
filename = t.strftime("brdc%j0.%yg")
folder_path = t.strftime('%Y/%j/%yg/')
return download_file(url_base, folder_path, cache_subdir, filename, compression='.Z')
else:
url_base = 'ftp://cddis.gsfc.nasa.gov/gnss/data/hourly/'
cache_subdir = cache_dir + 'hourly_nav/'
if constellation =='GPS':
filename = t.strftime("hour%j0.%yn")
folder_path = t.strftime('%Y/%j/')
return download_file(url_base, folder_path, cache_subdir, filename, compression='.Z', overwrite=True)
except IOError:
pass
def download_orbits(time, cache_dir):
cache_subdir = cache_dir + 'cddis_products/'
url_bases = (
'ftp://cddis.gsfc.nasa.gov/gnss/products/',
'ftp://igs.ign.fr/pub/igs/products/'
)
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
folder_path = "%i/" % (time.week)
if GPSTime.from_datetime(datetime.utcnow()) - time > 3*SECS_IN_WEEK:
try:
filename = "igs%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igr%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_18.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_12.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_06.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_00.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
return downloaded_files
def download_orbits_russia(time, cache_dir):
cache_subdir = cache_dir + 'russian_products/'
url_base = 'ftp://ftp.glonass-iac.ru/MCC/PRODUCTS/'
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
t = time.as_datetime()
if GPSTime.from_datetime(datetime.utcnow()) - time > 2*SECS_IN_WEEK:
try:
folder_path = t.strftime('%y%j/final/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
continue
except IOError:
pass
try:
folder_path = t.strftime('%y%j/rapid/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
try:
folder_path = t.strftime('%y%j/ultra/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
return downloaded_files
def download_ionex(time, cache_dir):
cache_subdir = cache_dir + 'ionex/'
t = time.as_datetime()
url_bases = (
'ftp://cddis.gsfc.nasa.gov/gnss/products/ionex/',
'ftp://igs.ign.fr/pub/igs/products/ionosphere'
)
folder_path = t.strftime('%Y/%j/')
for filename in [t.strftime("codg%j0.%yi"), t.strftime("c1pg%j0.%yi"), t.strftime("c2pg%j0.%yi")]:
try:
filepath = download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_dcb(time, cache_dir):
cache_subdir = cache_dir + 'dcb/'
# seem to be a lot of data missing, so try many days
for time in [time - i*SECS_IN_DAY for i in range(14)]:
try:
t = time.as_datetime()
url_bases = (
'ftp://cddis.nasa.gov/gnss/products/bias/',
'ftp://igs.ign.fr/pub/igs/products/mgex/dcb/'
)
folder_path = t.strftime('%Y/')
filename = t.strftime("CAS0MGXRAP_%Y%j0000_01D_01D_DCB.BSX")
filepath = download_file(url_bases, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_cors_coords(cache_dir):
cache_subdir = cache_dir + 'cors_coord/'
url_bases = (
'ftp://geodesy.noaa.gov/cors/coord/coord_14/',
'ftp://alt.ngs.noaa.gov/cors/coord/coord_14/'
)
file_names = list_dir(url_bases)
file_names = [file_name for file_name in file_names if file_name.endswith('coord.txt')]
filepaths = download_files(url_bases, '', cache_subdir, file_names)
return filepaths
def download_cors_station(time, station_name, cache_dir):
cache_subdir = cache_dir + 'cors_obs/'
t = time.as_datetime()
folder_path = t.strftime('%Y/%j/') + station_name + '/'
filename = station_name + t.strftime("%j0.%yo")
url_bases = (
'ftp://geodesy.noaa.gov/cors/rinex/',
'ftp://alt.ngs.noaa.gov/cors/rinex/'
)
try:
filepath = download_file(url_bases, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError:
print("File not downloaded, check availability on server.")
return None
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,475
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/carcontrols/joystickd.py
|
#!/usr/bin/env python
# This process publishes joystick events. Such events can be suscribed by
# mocked car controller scripts.
### this process needs pygame and can't run on the EON ###
import pygame # pylint: disable=import-error
import cereal.messaging as messaging
def joystick_thread():
joystick_sock = messaging.pub_sock('testJoystick')
pygame.init()
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
if joystick_count > 1:
raise ValueError("More than one joystick attached")
elif joystick_count < 1:
raise ValueError("No joystick found")
# -------- Main Program Loop -----------
while True:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
pass
# Available joystick events: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print("Joystick button released.")
joystick = pygame.joystick.Joystick(0)
joystick.init()
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = []
buttons = []
for a in range(joystick.get_numaxes()):
axes.append(joystick.get_axis(a))
for b in range(joystick.get_numbuttons()):
buttons.append(bool(joystick.get_button(b)))
dat = messaging.new_message('testJoystick')
dat.testJoystick.axes = axes
dat.testJoystick.buttons = buttons
joystick_sock.send(dat.to_bytes())
# Limit to 100 frames per second
clock.tick(100)
if __name__ == "__main__":
joystick_thread()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,476
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/laika/raw_gnss.py
|
import scipy.optimize as opt
import numpy as np
import datetime
from . import constants
from .lib.coordinates import LocalCoord
from .gps_time import GPSTime
from .helpers import rinex3_obs_from_rinex2_obs, \
get_nmea_id_from_prn, \
get_prn_from_nmea_id, \
get_constellation
def array_from_normal_meas(meas):
return np.concatenate(([get_nmea_id_from_prn(meas.prn)],
[meas.recv_time_week],
[meas.recv_time_sec],
[meas.glonass_freq],
[meas.observables['C1C']],
[meas.observables_std['C1C']],
[meas.observables['D1C']],
[meas.observables_std['D1C']],
[meas.observables['S1C']],
[meas.observables['L1C']]))
def normal_meas_from_array(arr):
observables, observables_std = {}, {}
observables['C1C'] = arr[4]
observables_std['C1C'] = arr[5]
observables['D1C'] = arr[6]
observables_std['D1C'] = arr[7]
observables['S1C'] = arr[8]
observables['L1C'] = arr[9]
return GNSSMeasurement(get_prn_from_nmea_id(arr[0]), arr[1], arr[2],
observables, observables_std, arr[3])
class GNSSMeasurement(object):
PRN = 0
RECV_TIME_WEEK = 1
RECV_TIME_SEC = 2
GLONASS_FREQ = 3
PR = 4
PR_STD = 5
PRR = 6
PRR_STD = 7
SAT_POS = slice(8, 11)
SAT_VEL = slice(11, 14)
def __init__(self, prn, recv_time_week, recv_time_sec,
observables, observables_std, glonass_freq=np.nan):
# Metadata
self.prn = prn # sattelite ID in rinex convention
self.recv_time_week = recv_time_week
self.recv_time_sec = recv_time_sec
self.recv_time = GPSTime(recv_time_week, recv_time_sec)
self.glonass_freq = glonass_freq # glonass channel
# Measurements
self.observables = observables
self.observables_std = observables_std
# flags
self.processed = False
self.corrected = False
# sat info
self.sat_pos = np.nan * np.ones(3)
self.sat_vel = np.nan * np.ones(3)
self.sat_clock_err = np.nan
self.sat_pos_final = np.nan * np.ones(3) # sat_pos in receiver time's ECEF frame instead of sattelite time's ECEF frame
self.observables_final = {}
def process(self, dog):
sat_time = self.recv_time - self.observables['C1C']/constants.SPEED_OF_LIGHT
sat_info = dog.get_sat_info(self.prn, sat_time)
if sat_info is None:
return False
else:
self.sat_pos = sat_info[0]
self.sat_vel = sat_info[1]
self.sat_clock_err = sat_info[2]
self.processed = True
return True
def correct(self, est_pos, dog):
for obs in self.observables:
if obs[0] == 'C': # or obs[0] == 'L':
delay = dog.get_delay(self.prn, self.recv_time, est_pos, signal=obs)
if delay:
self.observables_final[obs] = (self.observables[obs] +
self.sat_clock_err*constants.SPEED_OF_LIGHT -
delay)
else:
self.observables_final[obs] = self.observables[obs]
if 'C1C' in self.observables_final and 'C2P' in self.observables_final:
self.observables_final['IOF'] = (((constants.GPS_L1**2)*self.observables_final['C1C'] -
(constants.GPS_L2**2)*self.observables_final['C2P'])/
(constants.GPS_L1**2 - constants.GPS_L2**2))
geometric_range = np.linalg.norm(self.sat_pos - est_pos)
theta_1 = constants.EARTH_ROTATION_RATE*(geometric_range)/constants.SPEED_OF_LIGHT
self.sat_pos_final = [self.sat_pos[0]*np.cos(theta_1) + self.sat_pos[1]*np.sin(theta_1),
self.sat_pos[1]*np.cos(theta_1) - self.sat_pos[0]*np.sin(theta_1),
self.sat_pos[2]]
if 'C1C' in self.observables_final and np.isfinite(self.observables_final['C1C']):
self.corrected = True
return True
else:
return False
def as_array(self):
if not self.corrected:
raise NotImplementedError('Only corrected measurements can be put into arrays')
else:
ret = np.array([get_nmea_id_from_prn(self.prn), self.recv_time_week, self.recv_time_sec, self.glonass_freq,
self.observables_final['C1C'], self.observables_std['C1C'],
self.observables_final['D1C'], self.observables_std['D1C']])
ret = np.concatenate((ret, self.sat_pos_final, self.sat_vel))
return ret
def process_measurements(measurements, dog=None):
proc_measurements = []
for meas in measurements:
if meas.process(dog):
proc_measurements.append(meas)
return proc_measurements
def correct_measurements(measurements, est_pos, dog=None):
corrected_measurements = []
for meas in measurements:
if meas.correct(est_pos, dog):
corrected_measurements.append(meas)
return corrected_measurements
def group_measurements_by_epoch(measurements):
meas_filt_by_t = [[measurements[0]]]
for m in measurements[1:]:
if abs(m.recv_time - meas_filt_by_t[-1][-1].recv_time) > 1e-9:
meas_filt_by_t.append([])
meas_filt_by_t[-1].append(m)
return meas_filt_by_t
def group_measurements_by_sat(measurements):
measurements_by_sat = {}
sats = set([m.prn for m in measurements])
for sat in sats:
measurements_by_sat[sat] = [m for m in measurements if m.prn == sat]
return measurements_by_sat
def read_raw_qcom(report):
recv_tow = (report.gpsMilliseconds) * 1.0 / 1000.0 # seconds
recv_week = report.gpsWeek
recv_time = GPSTime(recv_week, recv_tow)
measurements = []
for i in report.sv:
svId = i.svId
if not i.measurementStatus.measurementNotUsable and i.measurementStatus.satelliteTimeIsKnown:
sat_tow = (
i.unfilteredMeasurementIntegral + i.unfilteredMeasurementFraction) / 1000
sat_time = GPSTime(recv_week, sat_tow)
observables, observables_std = {}, {}
observables['C1C'] = (recv_time - sat_time)*constants.SPEED_OF_LIGHT
observables_std['C1C'] = i.unfilteredTimeUncertainty * 1e-3 * constants.SPEED_OF_LIGHT
observables['D1C'] = i.unfilteredSpeed
observables_std['D1C'] = i.unfilteredSpeedUncertainty
observables['S1C'] = np.nan
observables['L1C'] = np.nan
measurements.append(GNSSMeasurement(get_prn_from_nmea_id(svId),
recv_time.week,
recv_time.tow,
observables,
observables_std))
return measurements
def read_raw_ublox(report):
recv_tow = (report.rcvTow) # seconds
recv_week = report.gpsWeek
recv_time = GPSTime(recv_week, recv_tow)
measurements = []
for i in report.measurements:
# only add gps and glonass fixes
if (i.gnssId == 0 or i.gnssId==6):
if i.svId > 32 or i.pseudorange > 2**32:
continue
if i.gnssId == 0:
prn = 'G%02i' % i.svId
else:
prn = 'R%02i' % i.svId
observables = {}
observables_std = {}
if i.trackingStatus.pseudorangeValid and i.sigId==0:
observables['C1C'] = i.pseudorange
# Empirically it seems obvious ublox's std is
# actually a variation
observables_std['C1C'] = np.sqrt(i.pseudorangeStdev)*10
if i.gnssId==6:
glonass_freq = i.glonassFrequencyIndex - 7
observables['D1C'] = -(constants.SPEED_OF_LIGHT / (constants.GLONASS_L1 + glonass_freq*constants.GLONASS_L1_DELTA)) * (i.doppler)
elif i.gnssId==0:
glonass_freq = np.nan
observables['D1C'] = -(constants.SPEED_OF_LIGHT / constants.GPS_L1) * (i.doppler)
observables_std['D1C'] = (constants.SPEED_OF_LIGHT / constants.GPS_L1) * i.dopplerStdev * 1
observables['S1C'] = i.cno
if i.trackingStatus.carrierPhaseValid:
observables['L1C'] = i.carrierCycles
else:
observables['L1C'] = np.nan
measurements.append(GNSSMeasurement(prn,
recv_time.week,
recv_time.tow,
observables,
observables_std,
glonass_freq))
return measurements
def read_rinex_obs(obsdata):
measurements = []
first_sat = list(obsdata.data.keys())[0]
n = len(obsdata.data[first_sat]['Epochs'])
for i in range(0, n):
recv_time_datetime = obsdata.data[first_sat]['Epochs'][i]
recv_time_datetime = recv_time_datetime.astype(datetime.datetime)
recv_time = GPSTime.from_datetime(recv_time_datetime)
measurements.append([])
for sat_str in list(obsdata.data.keys()):
if np.isnan(obsdata.data[sat_str]['C1'][i]):
continue
observables, observables_std = {}, {}
for obs in obsdata.data[sat_str]:
if obs == 'Epochs':
continue
observables[rinex3_obs_from_rinex2_obs(obs)] = obsdata.data[sat_str][obs][i]
observables_std[rinex3_obs_from_rinex2_obs(obs)] = 1
measurements[-1].append(GNSSMeasurement(get_prn_from_nmea_id(int(sat_str)),
recv_time.week,
recv_time.tow,
observables,
observables_std))
return measurements
def calc_pos_fix(measurements, x0=[0, 0, 0, 0, 0], no_weight=False, signal='C1C'):
'''
Calculates gps fix with WLS optimizer
returns:
0 -> list with positions
1 -> pseudorange errs
'''
n = len(measurements)
if n < 6:
return []
Fx_pos = pr_residual(measurements, signal=signal, no_weight=no_weight, no_nans=True)
opt_pos = opt.least_squares(Fx_pos, x0).x
return opt_pos, Fx_pos(opt_pos, no_weight=True)
def calc_vel_fix(measurements, est_pos, v0=[0, 0, 0, 0], no_weight=False, signal='D1C'):
'''
Calculates gps velocity fix with WLS optimizer
returns:
0 -> list with velocities
1 -> pseudorange_rate errs
'''
n = len(measurements)
if n < 6:
return []
Fx_vel = prr_residual(measurements, est_pos, no_weight=no_weight, no_nans=True)
opt_vel = opt.least_squares(Fx_vel, v0).x
return opt_vel, Fx_vel(opt_vel, no_weight=True)
def pr_residual(measurements, signal='C1C', no_weight=False, no_nans=False):
# solve for pos
def Fx_pos(xxx_todo_changeme, no_weight=no_weight):
(x, y, z, bc, bg) = xxx_todo_changeme
rows = []
for meas in measurements:
if signal in meas.observables_final and np.isfinite(meas.observables_final[signal]):
pr = meas.observables_final[signal]
sat_pos = meas.sat_pos_final
theta = 0
elif signal in meas.observables and np.isfinite(meas.observables[signal]) and meas.processed:
pr = meas.observables[signal]
pr += meas.sat_clock_err * constants.SPEED_OF_LIGHT
sat_pos = meas.sat_pos
theta = constants.EARTH_ROTATION_RATE * (pr - bc) / constants.SPEED_OF_LIGHT
else:
if not no_nans:
rows.append(np.nan)
continue
if no_weight:
weight = 1
else:
weight = (1 / meas.observables_std[signal])
if get_constellation(meas.prn) == 'GLONASS':
rows.append(weight * (np.sqrt(
(sat_pos[0] * np.cos(theta) + sat_pos[1] * np.sin(theta) - x)**2 +
(sat_pos[1] * np.cos(theta) - sat_pos[0] * np.sin(theta) - y)**2 +
(sat_pos[2] - z)**2) - (pr - bc - bg)))
elif get_constellation(meas.prn) == 'GPS':
rows.append(weight * (np.sqrt(
(sat_pos[0] * np.cos(theta) + sat_pos[1] * np.sin(theta) - x)**2 +
(sat_pos[1] * np.cos(theta) - sat_pos[0] * np.sin(theta) - y)**2 +
(sat_pos[2] - z)**2) - (pr - bc)))
return rows
return Fx_pos
def prr_residual(measurements, est_pos, signal='D1C', no_weight=False, no_nans=False):
# solve for vel
def Fx_vel(vel, no_weight=no_weight):
rows = []
for meas in measurements:
if signal not in meas.observables or not np.isfinite(meas.observables[signal]):
if not no_nans:
rows.append(np.nan)
continue
if meas.corrected:
sat_pos = meas.sat_pos_final
else:
sat_pos = meas.sat_pos
if no_weight:
weight = 1
else:
weight = (1 / meas.observables[signal])
los_vector = (sat_pos - est_pos[0:3]
) / np.linalg.norm(sat_pos - est_pos[0:3])
rows.append(
weight * ((meas.sat_vel - vel[0:3]).dot(los_vector) -
(meas.observables[signal] - vel[3])))
return rows
return Fx_vel
def get_Q(recv_pos, sat_positions):
local = LocalCoord.from_ecef(recv_pos)
sat_positions_rel = local.ecef2ned(sat_positions)
sat_distances = np.linalg.norm(sat_positions_rel, axis=1)
A = np.column_stack((sat_positions_rel[:,0]/sat_distances, # pylint: disable=unsubscriptable-object
sat_positions_rel[:,1]/sat_distances, # pylint: disable=unsubscriptable-object
sat_positions_rel[:,2]/sat_distances, # pylint: disable=unsubscriptable-object
-np.ones(len(sat_distances))))
if A.shape[0] < 4 or np.linalg.matrix_rank(A) < 4:
return np.inf*np.ones((4,4))
else:
Q = np.linalg.inv(A.T.dot(A))
return Q
def get_DOP(recv_pos, sat_positions):
Q = get_Q(recv_pos, sat_positions)
return np.sqrt(np.trace(Q))
def get_HDOP(recv_pos, sat_positions):
Q = get_Q(recv_pos, sat_positions)
return np.sqrt(np.trace(Q[:2,:2]))
def get_VDOP(recv_pos, sat_positions):
Q = get_Q(recv_pos, sat_positions)
return np.sqrt(Q[2,2])
def get_TDOP(recv_pos, sat_positions):
Q = get_Q(recv_pos, sat_positions)
return np.sqrt(Q[3,3])
def get_PDOP(recv_pos, sat_positions):
Q = get_Q(recv_pos, sat_positions)
return np.sqrt(np.trace(Q[:3,:3]))
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,477
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika/helpers.py
|
import numpy as np
from .lib.coordinates import LocalCoord
GPS_OFFSET = 0
GLONASS_OFFSET = 64
GALILEO_OFFSET = 96
QZNSS_OFFSET = 192
BEIDOU_OFFSET = 200
GPS_SIZE = 32
GLONASS_SIZE = 28
GALILEO_SIZE = 36
QZNSS_SIZE = 4
BEIDOU_SIZE = 14
def get_el_az(pos, sat_pos):
converter = LocalCoord.from_ecef(pos)
sat_ned = converter.ecef2ned(sat_pos)
sat_range = np.linalg.norm(sat_ned)
el = np.arcsin(-sat_ned[2]/sat_range)
az = np.arctan2(sat_ned[1], sat_ned[0])
return el, az
def get_closest(time, candidates, recv_pos=None):
if recv_pos is None:
# Takes a list of object that have an epoch(GPSTime) value
# and return the one that is closest the given time (GPSTime)
tdiff = np.inf
closest = None
for candidate in candidates:
if abs(time - candidate.epoch) < tdiff:
closest = candidate
tdiff = abs(time - candidate.epoch)
return closest
else:
pdiff = np.inf
closest = None
for candidate in candidates:
cand_diff = np.linalg.norm(recv_pos - candidate.pos)
if cand_diff < pdiff and candidate.valid(time, recv_pos):
pdiff = cand_diff
closest = candidate
return closest
def get_constellation(prn):
if prn[0] == 'G':
return 'GPS'
elif prn[0] == 'R':
return 'GLONASS'
elif prn[0] == 'E':
return 'GALILEO'
elif prn[0] == 'J':
return 'QZNSS'
elif prn[0] == 'C':
return 'BEIDOU'
else:
raise NotImplementedError('The constellation of RINEX3 constellation identifier: %s not known' % prn[0])
def get_prn_from_nmea_id(nmea_id):
if nmea_id in np.arange(1,GPS_SIZE + 1) + GPS_OFFSET:
return 'G%02i' % (nmea_id - GPS_OFFSET)
elif nmea_id in (np.arange(1,GLONASS_SIZE + 1) + GLONASS_OFFSET):
return 'R%02i' % (nmea_id - GLONASS_OFFSET)
elif nmea_id in (np.arange(1,GALILEO_SIZE + 1) + GALILEO_OFFSET):
return 'E%02i' % (nmea_id - GALILEO_OFFSET)
elif nmea_id in (np.arange(1,QZNSS_SIZE + 1) + QZNSS_OFFSET):
return 'J%02i' % (nmea_id - QZNSS_OFFSET)
elif nmea_id in (np.arange(1,BEIDOU_SIZE + 1) + BEIDOU_OFFSET):
return 'C%02i' % (nmea_id - BEIDOU_OFFSET)
else:
raise NotImplementedError("RINEX PRN for nmea id %i not known" % nmea_id)
def get_nmea_id_from_prn(prn):
if prn[0] == 'G':
nmea_id = int(prn[1:]) + GPS_OFFSET
# glonass record
elif prn[0] == 'R':
nmea_id = int(prn[1:]) + GLONASS_OFFSET
# galileo record
elif prn[0] == 'E':
nmea_id = int(prn[1:]) + GALILEO_OFFSET
# QZNSS record
elif prn[0] == 'J':
nmea_id = int(prn[1:]) + QZNSS_OFFSET
# Beidou record
elif prn[0] == 'C':
nmea_id = int(prn[1:]) + BEIDOU_OFFSET
else:
raise NotImplementedError("RINEX constelletion identifier %s not supported by laika" % prn[0])
return nmea_id
def get_prns_from_constellation(constellation):
if constellation == 'GPS':
return ['G' + str(n).zfill(2) for n in range(1, GPS_SIZE + 1)]
elif constellation == 'GLONASS':
return ['R' + str(n).zfill(2) for n in range(1, GLONASS_SIZE + 1)]
elif constellation == 'GALILEO':
return ['E' + str(n).zfill(2) for n in range(1, GALILEO_SIZE + 1)]
elif constellation == 'QZNSS':
return ['J' + str(n).zfill(2) for n in range(1, QZNSS_SIZE + 1)]
elif constellation == 'BEIDOU':
return ['C' + str(n).zfill(2) for n in range(1, BEIDOU_SIZE + 1)]
def rinex3_obs_from_rinex2_obs(observable):
if observable == 'P2':
return 'C2P'
if len(observable) == 2:
return observable + 'C'
else:
raise NotImplementedError("Don't know this: " + observable)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,478
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/process_replay/update_model.py
|
#!/usr/bin/env python3
import os
import sys
from selfdrive.test.openpilotci import upload_file
from selfdrive.test.process_replay.compare_logs import save_log
from selfdrive.test.process_replay.test_processes import segments, get_segment
from selfdrive.version import get_git_commit
from tools.lib.logreader import LogReader
from selfdrive.test.process_replay.inject_model import inject_model
if __name__ == "__main__":
no_upload = "--no-upload" in sys.argv
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
ref_commit_fn = os.path.join(process_replay_dir, "model_ref_commit")
ref_commit = get_git_commit()
if ref_commit is None:
raise Exception("couldn't get ref commit")
with open(ref_commit_fn, "w") as f:
f.write(ref_commit)
for car_brand, segment in segments:
rlog_fn = get_segment(segment, original=True)
if rlog_fn is None:
print("failed to get segment %s" % segment)
sys.exit(1)
lr = LogReader(rlog_fn)
print('injecting model into % s' % segment)
lr = inject_model(lr, segment)
route_name, segment_num = segment.rsplit("--", 1)
log_fn = "%s/%s/rlog_%s.bz2" % (route_name.replace("|", "/"), segment_num, ref_commit)
tmp_name = 'tmp_%s_%s' % (route_name, segment_num)
save_log(tmp_name, lr)
if not no_upload:
upload_file(tmp_name, log_fn)
print('uploaded %s', log_fn)
os.remove(tmp_name)
os.remove(rlog_fn)
print("done")
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,479
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/car/volkswagen/carstate.py
|
import numpy as np
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.car.interfaces import CarStateBase
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.car.volkswagen.values import DBC, CANBUS, NWL, TRANS, GEAR, BUTTON_STATES, CarControllerParams
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]['pt'])
if CP.safetyModel == car.CarParams.SafetyModel.volkswagenPq:
# Configure for PQ35/PQ46/NMS network messaging
self.get_can_parser = self.get_pq_can_parser
self.get_cam_can_parser = self.get_pq_cam_can_parser
self.update = self.update_pq
if CP.transmissionType == TRANS.automatic:
self.shifter_values = can_define.dv["Getriebe_1"]['Waehlhebelposition__Getriebe_1_']
else:
# Configure for MQB network messaging (default)
self.get_can_parser = self.get_mqb_can_parser
self.get_cam_can_parser = self.get_mqb_cam_can_parser
self.update = self.update_mqb
if CP.transmissionType == TRANS.automatic:
self.shifter_values = can_define.dv["Getriebe_11"]['GE_Fahrstufe']
elif CP.transmissionType == TRANS.direct:
self.shifter_values = can_define.dv["EV_Gearshift"]['GearPosition']
self.buttonStates = BUTTON_STATES.copy()
def update_mqb(self, pt_cp, cam_cp, acc_cp, trans_type):
ret = car.CarState.new_message()
# Update vehicle speed and acceleration from ABS wheel speeds.
ret.wheelSpeeds.fl = pt_cp.vl["ESP_19"]['ESP_VL_Radgeschw_02'] * CV.KPH_TO_MS
ret.wheelSpeeds.fr = pt_cp.vl["ESP_19"]['ESP_VR_Radgeschw_02'] * CV.KPH_TO_MS
ret.wheelSpeeds.rl = pt_cp.vl["ESP_19"]['ESP_HL_Radgeschw_02'] * CV.KPH_TO_MS
ret.wheelSpeeds.rr = pt_cp.vl["ESP_19"]['ESP_HR_Radgeschw_02'] * CV.KPH_TO_MS
ret.vEgoRaw = float(np.mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr]))
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
# Update steering angle, rate, yaw rate, and driver input torque. VW send
# the sign/direction in a separate signal so they must be recombined.
ret.steeringAngle = pt_cp.vl["LH_EPS_03"]['EPS_Berechneter_LW'] * (1, -1)[int(pt_cp.vl["LH_EPS_03"]['EPS_VZ_BLW'])]
ret.steeringRate = pt_cp.vl["LWI_01"]['LWI_Lenkradw_Geschw'] * (1, -1)[int(pt_cp.vl["LWI_01"]['LWI_VZ_Lenkradw_Geschw'])]
ret.steeringTorque = pt_cp.vl["LH_EPS_03"]['EPS_Lenkmoment'] * (1, -1)[int(pt_cp.vl["LH_EPS_03"]['EPS_VZ_Lenkmoment'])] * 100.0
ret.steeringPressed = abs(ret.steeringTorque) > CarControllerParams.STEER_DRIVER_ALLOWANCE
ret.yawRate = pt_cp.vl["ESP_02"]['ESP_Gierrate'] * (1, -1)[int(pt_cp.vl["ESP_02"]['ESP_VZ_Gierrate'])] * CV.DEG_TO_RAD
# Update gas, brakes, and gearshift.
ret.gas = pt_cp.vl["Motor_20"]['MO_Fahrpedalrohwert_01'] / 100.0
ret.gasPressed = ret.gas > 0
ret.brake = pt_cp.vl["ESP_05"]['ESP_Bremsdruck'] / 250.0 # FIXME: this is pressure in Bar, not sure what OP expects
ret.brakePressed = bool(pt_cp.vl["ESP_05"]['ESP_Fahrer_bremst'])
ret.brakeLights = bool(pt_cp.vl["ESP_05"]['ESP_Status_Bremsdruck'])
# Additional safety checks performed in CarInterface.
self.parkingBrakeSet = bool(pt_cp.vl["Kombi_01"]['KBI_Handbremse']) # FIXME: need to include an EPB check as well
ret.espDisabled = pt_cp.vl["ESP_21"]['ESP_Tastung_passiv'] != 0
# Update gear and/or clutch position data.
if trans_type == TRANS.automatic:
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl["Getriebe_11"]['GE_Fahrstufe'], None))
elif trans_type == TRANS.direct:
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl["EV_Gearshift"]['GearPosition'], None))
elif trans_type == TRANS.manual:
ret.clutchPressed = not pt_cp.vl["Motor_14"]['MO_Kuppl_schalter']
reverse_light = bool(pt_cp.vl["Gateway_72"]['BCM1_Rueckfahrlicht_Schalter'])
if reverse_light:
ret.gearShifter = GEAR.reverse
else:
ret.gearShifter = GEAR.drive
# Update door and trunk/hatch lid open status.
ret.doorOpen = any([pt_cp.vl["Gateway_72"]['ZV_FT_offen'],
pt_cp.vl["Gateway_72"]['ZV_BT_offen'],
pt_cp.vl["Gateway_72"]['ZV_HFS_offen'],
pt_cp.vl["Gateway_72"]['ZV_HBFS_offen'],
pt_cp.vl["Gateway_72"]['ZV_HD_offen']])
# Update seatbelt fastened status.
ret.seatbeltUnlatched = pt_cp.vl["Airbag_02"]["AB_Gurtschloss_FA"] != 3
# Update driver preference for metric. VW stores many different unit
# preferences, including separate units for for distance vs. speed.
# We use the speed preference for OP.
self.displayMetricUnits = not pt_cp.vl["Einheiten_01"]["KBI_MFA_v_Einheit_02"]
# Stock FCW is considered active if a warning is displayed to the driver
# or the release bit for brake-jerk warning is set. Stock AEB considered
# active if the partial braking or target braking release bits are set.
# Ref: VW SSP 890253 "Volkswagen Driver Assistance Systems V2", "Front
# Assist with Braking: Golf Family" (applies to all MQB)
ret.stockFcw = bool(acc_cp.vl["ACC_10"]["AWV2_Freigabe"])
ret.stockAeb = any([bool(acc_cp.vl["ACC_10"]["ANB_Teilbremsung_Freigabe"]),
bool(acc_cp.vl["ACC_10"]["ANB_Zielbremsung_Freigabe"])])
# Consume blind-spot radar info/warning LED states, if available
ret.leftBlindspot = any([bool(acc_cp.vl["SWA_01"]["SWA_Infostufe_SWA_li"]),
bool(acc_cp.vl["SWA_01"]["SWA_Warnung_SWA_li"])])
ret.rightBlindspot = any([bool(acc_cp.vl["SWA_01"]["SWA_Infostufe_SWA_re"]),
bool(acc_cp.vl["SWA_01"]["SWA_Warnung_SWA_re"])])
# Consume SWA (Lane Change Assist) relevant info from factory LDW message
# to pass along to the blind spot radar controller
self.ldw_lane_warning_left = bool(cam_cp.vl["LDW_02"]["LDW_SW_Warnung_links"])
self.ldw_lane_warning_right = bool(cam_cp.vl["LDW_02"]["LDW_SW_Warnung_rechts"])
self.ldw_side_dlc_tlc = bool(cam_cp.vl["LDW_02"]["LDW_Seite_DLCTLC"])
self.ldw_dlc = cam_cp.vl["LDW_02"]["LDW_DLC"]
self.ldw_tlc = cam_cp.vl["LDW_02"]["LDW_TLC"]
# Update ACC radar status.
accStatus = pt_cp.vl["TSK_06"]['TSK_Status']
if accStatus == 2:
# ACC okay and enabled, but not currently engaged
ret.cruiseState.available = True
ret.cruiseState.enabled = False
elif accStatus in [3, 4, 5]:
# ACC okay and enabled, currently engaged and regulating speed (3) or engaged with driver accelerating (4) or overrun (5)
ret.cruiseState.available = True
ret.cruiseState.enabled = True
else:
# ACC okay but disabled (1), or a radar visibility or other fault/disruption (6 or 7)
ret.cruiseState.available = False
ret.cruiseState.enabled = False
# Update ACC setpoint. When the setpoint is zero or there's an error, the
# radar sends a set-speed of ~90.69 m/s / 203mph.
ret.cruiseState.speed = acc_cp.vl["ACC_02"]["ACC_Wunschgeschw"] * CV.KPH_TO_MS
if ret.cruiseState.speed > 90:
ret.cruiseState.speed = 0
# Update control button states for turn signals and ACC controls.
self.buttonStates["accelCruise"] = bool(pt_cp.vl["GRA_ACC_01"]['GRA_Tip_Hoch'])
self.buttonStates["decelCruise"] = bool(pt_cp.vl["GRA_ACC_01"]['GRA_Tip_Runter'])
self.buttonStates["cancel"] = bool(pt_cp.vl["GRA_ACC_01"]['GRA_Abbrechen'])
self.buttonStates["setCruise"] = bool(pt_cp.vl["GRA_ACC_01"]['GRA_Tip_Setzen'])
self.buttonStates["resumeCruise"] = bool(pt_cp.vl["GRA_ACC_01"]['GRA_Tip_Wiederaufnahme'])
self.buttonStates["gapAdjustCruise"] = bool(pt_cp.vl["GRA_ACC_01"]['GRA_Verstellung_Zeitluecke'])
ret.leftBlinker = bool(pt_cp.vl["Gateway_72"]['BH_Blinker_li'])
ret.rightBlinker = bool(pt_cp.vl["Gateway_72"]['BH_Blinker_re'])
# Read ACC hardware button type configuration info that has to pass thru
# to the radar. Ends up being different for steering wheel buttons vs
# third stalk type controls.
self.graHauptschalter = pt_cp.vl["GRA_ACC_01"]['GRA_Hauptschalter']
self.graTypHauptschalter = pt_cp.vl["GRA_ACC_01"]['GRA_Typ_Hauptschalter']
self.graButtonTypeInfo = pt_cp.vl["GRA_ACC_01"]['GRA_ButtonTypeInfo']
self.graTipStufe2 = pt_cp.vl["GRA_ACC_01"]['GRA_Tip_Stufe_2']
self.graTyp468 = pt_cp.vl["GRA_ACC_01"]['GRA_Typ468']
# Pick up the GRA_ACC_01 CAN message counter so we can sync to it for
# later cruise-control button spamming.
self.graMsgBusCounter = pt_cp.vl["GRA_ACC_01"]['COUNTER']
# Check to make sure the electric power steering rack is configured to
# accept and respond to HCA_01 messages and has not encountered a fault.
self.steeringFault = not pt_cp.vl["LH_EPS_03"]["EPS_HCA_Status"]
return ret
def update_pq(self, pt_cp, cam_cp, acc_cp, trans_type):
ret = car.CarState.new_message()
# Update vehicle speed and acceleration from ABS wheel speeds.
ret.wheelSpeeds.fl = pt_cp.vl["Bremse_3"]['Radgeschw__VL_4_1'] * CV.KPH_TO_MS
ret.wheelSpeeds.fr = pt_cp.vl["Bremse_3"]['Radgeschw__VR_4_1'] * CV.KPH_TO_MS
ret.wheelSpeeds.rl = pt_cp.vl["Bremse_3"]['Radgeschw__HL_4_1'] * CV.KPH_TO_MS
ret.wheelSpeeds.rr = pt_cp.vl["Bremse_3"]['Radgeschw__HR_4_1'] * CV.KPH_TO_MS
ret.vEgoRaw = float(np.mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr]))
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
# Update steering angle, rate, yaw rate, and driver input torque. VW send
# the sign/direction in a separate signal so they must be recombined.
ret.steeringAngle = pt_cp.vl["Lenkhilfe_3"]['LH3_BLW'] * (1, -1)[int(pt_cp.vl["Lenkhilfe_3"]['LH3_BLWSign'])]
ret.steeringRate = pt_cp.vl["Lenkwinkel_1"]['Lenkradwinkel_Geschwindigkeit'] * (1, -1)[int(pt_cp.vl["Lenkwinkel_1"]['Lenkradwinkel_Geschwindigkeit_S'])]
ret.steeringTorque = pt_cp.vl["Lenkhilfe_3"]['LH3_LM'] * (1, -1)[int(pt_cp.vl["Lenkhilfe_3"]['LH3_LMSign'])]
ret.steeringPressed = abs(ret.steeringTorque) > CarControllerParams.STEER_DRIVER_ALLOWANCE
ret.yawRate = pt_cp.vl["Bremse_5"]['Giergeschwindigkeit'] * (1, -1)[int(pt_cp.vl["Bremse_5"]['Vorzeichen_der_Giergeschwindigk'])] * CV.DEG_TO_RAD
# Update gas, brakes, and gearshift.
ret.gas = pt_cp.vl["Motor_3"]['Fahrpedal_Rohsignal'] / 100.0
ret.gasPressed = ret.gas > 0
ret.brake = pt_cp.vl["Bremse_5"]['Bremsdruck'] / 250.0 # FIXME: this is pressure in Bar, not sure what OP expects
ret.brakePressed = bool(pt_cp.vl["Motor_2"]['Bremstestschalter'])
ret.brakeLights = bool(pt_cp.vl["Motor_2"]['Bremslichtschalter'])
# Additional safety checks performed in CarInterface.
self.parkingBrakeSet = bool(pt_cp.vl["Kombi_1"]['Bremsinfo']) # FIXME: need to include an EPB check as well
ret.espDisabled = bool(pt_cp.vl["Bremse_1"]['ESP_Passiv_getastet'])
# Update gear and/or clutch position data.
if trans_type == TRANS.automatic:
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl["Getriebe_1"]['Waehlhebelposition__Getriebe_1_'], None))
elif trans_type == TRANS.manual:
ret.clutchPressed = not pt_cp.vl["Motor_1"]['Kupplungsschalter']
reverse_light = bool(pt_cp.vl["Gate_Komf_1"]['GK1_Rueckfahr'])
if reverse_light:
ret.gearShifter = GEAR.reverse
else:
ret.gearShifter = GEAR.drive
# Update door and trunk/hatch lid open status.
# TODO: need to locate signals for other three doors if possible
ret.doorOpen = bool(pt_cp.vl["Gate_Komf_1"]['GK1_Fa_Tuerkont'])
# Update seatbelt fastened status.
ret.seatbeltUnlatched = not bool(pt_cp.vl["Airbag_1"]["Gurtschalter_Fahrer"])
# Update driver preference for metric. VW stores many different unit
# preferences, including separate units for for distance vs. speed.
# We use the speed preference for OP.
self.displayMetricUnits = not pt_cp.vl["Einheiten_1"]["MFA_v_Einheit_02"]
# Stock FCW is considered active if a warning is displayed to the driver
# or the release bit for brake-jerk warning is set. Stock AEB considered
# active if the partial braking or target braking release bits are set.
# Ref: VW SSP 890253 "Volkswagen Driver Assistance Systems V2", "Front
# Assist with Braking: Golf Family" (applies to all MQB)
ret.stockFcw = False
ret.stockAeb = False
# Consume blind-spot radar info/warning LED states, if available
ret.leftBlindspot = False
ret.rightBlindspot = False
# Consume SWA (Lane Change Assist) relevant info from factory LDW message
# to pass along to the blind spot radar controller
self.ldw_lane_warning_left = False
self.ldw_lane_warning_right = False
self.ldw_side_dlc_tlc = False
self.ldw_dlc = 0
self.ldw_tlc = 0
# Update ACC radar status.
# FIXME: This is unfinished and not fully correct, need to improve further
ret.cruiseState.available = bool(pt_cp.vl["GRA_neu"]['Hauptschalter'])
ret.cruiseState.enabled = True if pt_cp.vl["Motor_2"]['GRA_Status'] in [1, 2] else False
# Update ACC setpoint. When the setpoint reads as 255, the driver has not
# yet established an ACC setpoint, so treat it as zero.
ret.cruiseState.speed = acc_cp.vl["ACC_GRA_Anziege"]['ACA_V_Wunsch'] * CV.KPH_TO_MS
if ret.cruiseState.speed > 70: # 255 kph in m/s == no current setpoint
ret.cruiseState.speed = 0
# Update control button states for turn signals and ACC controls.
self.buttonStates["accelCruise"] = bool(pt_cp.vl["GRA_neu"]['Kurz_Tip_up']) or bool(pt_cp.vl["GRA_neu"]['Lang_Tip_up'])
self.buttonStates["decelCruise"] = bool(pt_cp.vl["GRA_neu"]['Kurz_Tip_down']) or bool(pt_cp.vl["GRA_neu"]['Lang_Tip_down'])
self.buttonStates["cancel"] = bool(pt_cp.vl["GRA_neu"]['Abbrechen'])
self.buttonStates["setCruise"] = bool(pt_cp.vl["GRA_neu"]['Setzen'])
self.buttonStates["resumeCruise"] = bool(pt_cp.vl["GRA_neu"]['Wiederaufnahme'])
self.buttonStates["gapAdjustCruise"] = bool(pt_cp.vl["GRA_neu"]['Zeitlueckenverstellung'])
ret.leftBlinker = bool(pt_cp.vl["Gate_Komf_1"]['GK1_Blinker_li'])
ret.rightBlinker = bool(pt_cp.vl["Gate_Komf_1"]['GK1_Blinker_re'])
# Read ACC hardware button type configuration info that has to pass thru
# to the radar. Ends up being different for steering wheel buttons vs
# third stalk type controls.
# TODO: Check to see what info we need to passthru and spoof on PQ
self.graHauptschalter = pt_cp.vl["GRA_neu"]['Hauptschalter']
self.graTypHauptschalter = False
self.graButtonTypeInfo = False
self.graTipStufe2 = False
# Pick up the GRA_ACC_01 CAN message counter so we can sync to it for
# later cruise-control button spamming.
# FIXME: will need msg counter and checksum algo to spoof GRA_neu
self.graMsgBusCounter = 0
# Check to make sure the electric power steering rack is configured to
# accept and respond to HCA_01 messages and has not encountered a fault.
self.steeringFault = pt_cp.vl["Lenkhilfe_2"]['LH2_Sta_HCA'] not in [3, 5]
return ret
@staticmethod
def get_mqb_can_parser(CP):
# this function generates lists for signal, messages and initial values
signals = [
# sig_name, sig_address, default
("EPS_Berechneter_LW", "LH_EPS_03", 0), # Absolute steering angle
("EPS_VZ_BLW", "LH_EPS_03", 0), # Steering angle sign
("LWI_Lenkradw_Geschw", "LWI_01", 0), # Absolute steering rate
("LWI_VZ_Lenkradw_Geschw", "LWI_01", 0), # Steering rate sign
("ESP_VL_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, front left
("ESP_VR_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, front right
("ESP_HL_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, rear left
("ESP_HR_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, rear right
("ESP_Gierrate", "ESP_02", 0), # Absolute yaw rate
("ESP_VZ_Gierrate", "ESP_02", 0), # Yaw rate sign
("ZV_FT_offen", "Gateway_72", 0), # Door open, driver
("ZV_BT_offen", "Gateway_72", 0), # Door open, passenger
("ZV_HFS_offen", "Gateway_72", 0), # Door open, rear left
("ZV_HBFS_offen", "Gateway_72", 0), # Door open, rear right
("ZV_HD_offen", "Gateway_72", 0), # Trunk or hatch open
("BH_Blinker_li", "Gateway_72", 0), # Left turn signal on
("BH_Blinker_re", "Gateway_72", 0), # Right turn signal on
("AB_Gurtschloss_FA", "Airbag_02", 0), # Seatbelt status, driver
("AB_Gurtschloss_BF", "Airbag_02", 0), # Seatbelt status, passenger
("ESP_Fahrer_bremst", "ESP_05", 0), # Brake pedal pressed
("ESP_Status_Bremsdruck", "ESP_05", 0), # Brakes applied
("ESP_Bremsdruck", "ESP_05", 0), # Brake pressure applied
("MO_Fahrpedalrohwert_01", "Motor_20", 0), # Accelerator pedal value
("EPS_Lenkmoment", "LH_EPS_03", 0), # Absolute driver torque input
("EPS_VZ_Lenkmoment", "LH_EPS_03", 0), # Driver torque input sign
("EPS_HCA_Status", "LH_EPS_03", 0), # Steering rack ready to process HCA commands
("ESP_Tastung_passiv", "ESP_21", 0), # Stability control disabled
("KBI_MFA_v_Einheit_02", "Einheiten_01", 0), # MPH vs KMH speed display
("KBI_Handbremse", "Kombi_01", 0), # Manual handbrake applied
("TSK_Status", "TSK_06", 0), # ACC engagement status from drivetrain coordinator
("GRA_Hauptschalter", "GRA_ACC_01", 0), # ACC button, on/off
("GRA_Abbrechen", "GRA_ACC_01", 0), # ACC button, cancel
("GRA_Tip_Setzen", "GRA_ACC_01", 0), # ACC button, set
("GRA_Tip_Hoch", "GRA_ACC_01", 0), # ACC button, increase or accel
("GRA_Tip_Runter", "GRA_ACC_01", 0), # ACC button, decrease or decel
("GRA_Tip_Wiederaufnahme", "GRA_ACC_01", 0), # ACC button, resume
("GRA_Verstellung_Zeitluecke", "GRA_ACC_01", 0), # ACC button, time gap adj
("GRA_Typ_Hauptschalter", "GRA_ACC_01", 0), # ACC main button type
("GRA_Tip_Stufe_2", "GRA_ACC_01", 0), # unknown related to stalk type
("GRA_Typ468", "GRA_ACC_01", 0), # Set/Resume button behavior as overloaded coast/accel??
("GRA_ButtonTypeInfo", "GRA_ACC_01", 0), # unknown related to stalk type
("COUNTER", "GRA_ACC_01", 0), # GRA_ACC_01 CAN message counter
]
checks = [
# sig_address, frequency
("LWI_01", 100), # From J500 Steering Assist with integrated sensors
("LH_EPS_03", 100), # From J500 Steering Assist with integrated sensors
("ESP_19", 100), # From J104 ABS/ESP controller
("ESP_05", 50), # From J104 ABS/ESP controller
("ESP_21", 50), # From J104 ABS/ESP controller
("Motor_20", 50), # From J623 Engine control module
("TSK_06", 50), # From J623 Engine control module
("GRA_ACC_01", 33), # From J??? steering wheel control buttons
("Gateway_72", 10), # From J533 CAN gateway (aggregated data)
("Airbag_02", 5), # From J234 Airbag control module
("Kombi_01", 2), # From J285 Instrument cluster
("Einheiten_01", 1), # From J??? not known if gateway, cluster, or BCM
]
if CP.transmissionType == TRANS.automatic:
signals += [("GE_Fahrstufe", "Getriebe_11", 0)] # Auto trans gear selector position
checks += [("Getriebe_11", 20)] # From J743 Auto transmission control module
elif CP.transmissionType == TRANS.direct:
signals += [("GearPosition", "EV_Gearshift", 0)] # EV gear selector position
checks += [("EV_Gearshift", 10)] # From J??? unknown EV control module
elif CP.transmissionType == TRANS.manual:
signals += [("MO_Kuppl_schalter", "Motor_14", 0), # Clutch switch
("BCM1_Rueckfahrlicht_Schalter", "Gateway_72", 0)] # Reverse light from BCM
checks += [("Motor_14", 10)] # From J623 Engine control module
if CP.networkLocation == NWL.fwdCamera:
# Extended CAN devices other than the camera are here on CANBUS.pt
# FIXME: gate SWA_01 checks on module being detected, and reduce duplicate network location code
signals += [("AWV2_Priowarnung", "ACC_10", 0), # FCW related
("AWV2_Freigabe", "ACC_10", 0), # FCW related
("ANB_Teilbremsung_Freigabe", "ACC_10", 0), # AEB related
("ANB_Zielbremsung_Freigabe", "ACC_10", 0), # AEB related
("SWA_Infostufe_SWA_li", "SWA_01", 0), # Blindspot object info, left
("SWA_Warnung_SWA_li", "SWA_01", 0), # Blindspot object warning, left
("SWA_Infostufe_SWA_re", "SWA_01", 0), # Blindspot object info, right
("SWA_Warnung_SWA_re", "SWA_01", 0), # Blindspot object warning, right
("ACC_Wunschgeschw", "ACC_02", 0)] # ACC set speed
checks += [("ACC_10", 50), # From J428 ACC radar control module
# FIXME: SWA_01 should be checked when we have better detection of installed hardware
#("SWA_01", 20), # From J1086 Lane Change Assist module
("ACC_02", 17)] # From J428 ACC radar control module
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, CANBUS.pt)
@staticmethod
def get_pq_can_parser(CP):
signals = [
# sig_name, sig_address, default
("LH3_BLW", "Lenkhilfe_3", 0), # Absolute steering angle
("LH3_BLWSign", "Lenkhilfe_3", 0), # Steering angle sign
("LH3_LM", "Lenkhilfe_3", 0), # Absolute driver torque input
("LH3_LMSign", "Lenkhilfe_3", 0), # Driver torque input sign
("LH2_Sta_HCA", "Lenkhilfe_2", 0), # Steering rack HCA status
("Lenkradwinkel_Geschwindigkeit", "Lenkwinkel_1", 0), # Absolute steering rate
("Lenkradwinkel_Geschwindigkeit_S", "Lenkwinkel_1", 0), # Steering rate sign
("Radgeschw__VL_4_1", "Bremse_3", 0), # ABS wheel speed, front left
("Radgeschw__VR_4_1", "Bremse_3", 0), # ABS wheel speed, front right
("Radgeschw__HL_4_1", "Bremse_3", 0), # ABS wheel speed, rear left
("Radgeschw__HR_4_1", "Bremse_3", 0), # ABS wheel speed, rear right
("Giergeschwindigkeit", "Bremse_5", 0), # Absolute yaw rate
("Vorzeichen_der_Giergeschwindigk", "Bremse_5", 0), # Yaw rate sign
("GK1_Fa_Tuerkont", "Gate_Komf_1", 0), # Door open, driver
# TODO: locate passenger and rear door states
("GK1_Blinker_li", "Gate_Komf_1", 0), # Left turn signal on
("GK1_Blinker_re", "Gate_Komf_1", 0), # Right turn signal on
("Gurtschalter_Fahrer", "Airbag_1", 0), # Seatbelt status, driver
("Gurtschalter_Beifahrer", "Airbag_1", 0), # Seatbelt status, passenger
("Bremstestschalter", "Motor_2", 0), # Brake pedal pressed (brake light test switch)
("Bremslichtschalter", "Motor_2", 0), # Brakes applied (brake light switch)
("Bremsdruck", "Bremse_5", 0), # Brake pressure applied
("Vorzeichen_Bremsdruck", "Bremse_5", 0), # Brake pressure applied sign (???)
("Fahrpedal_Rohsignal", "Motor_3", 0), # Accelerator pedal value
("ESP_Passiv_getastet", "Bremse_1", 0), # Stability control disabled
("MFA_v_Einheit_02", "Einheiten_1", 0), # MPH vs KMH speed display
("Bremsinfo", "Kombi_1", 0), # Manual handbrake applied
("GRA_Status", "Motor_2", 0), # ACC engagement status
("Hauptschalter", "GRA_neu", 0), # ACC button, on/off
("Abbrechen", "GRA_neu", 0), # ACC button, cancel
("Setzen", "GRA_neu", 0), # ACC button, set
("Lang_Tip_up", "GRA_neu", 0), # ACC button, increase or accel, long press
("Lang_Tip_down", "GRA_neu", 0), # ACC button, decrease or decel, long press
("Kurz_Tip_up", "GRA_neu", 0), # ACC button, increase or accel, short press
("Kurz_Tip_down", "GRA_neu", 0), # ACC button, decrease or decel, short press
("Wiederaufnahme", "GRA_neu", 0), # ACC button, resume
("Zeitlueckenverstellung", "GRA_neu", 0), # ACC button, time gap adj
]
checks = [
# sig_address, frequency
("Bremse_3", 100), # From J104 ABS/ESP controller
("Lenkhilfe_3", 100), # From J500 Steering Assist with integrated sensors
("Lenkwinkel_1", 100), # From J500 Steering Assist with integrated sensors
("Motor_3", 100), # From J623 Engine control module
("Airbag_1", 50), # From J234 Airbag control module
("Bremse_5", 50), # From J104 ABS/ESP controller
("GRA_neu", 50), # From J??? steering wheel control buttons
("Kombi_1", 50), # From J285 Instrument cluster
("Motor_2", 50), # From J623 Engine control module
("Lenkhilfe_2", 20), # From J500 Steering Assist with integrated sensors
("Gate_Komf_1", 10), # From J533 CAN gateway
("Einheiten_1", 1), # From J??? cluster or gateway
]
if CP.transmissionType == TRANS.automatic:
signals += [("Waehlhebelposition__Getriebe_1_", "Getriebe_1", 0)] # Auto trans gear selector position
checks += [("Getriebe_1", 100)] # From J743 Auto transmission control module
elif CP.transmissionType == TRANS.manual:
signals += [("Kupplungsschalter", "Motor_1", 0), # Clutch switch
("GK1_Rueckfahr", "Gate_Komf_1", 0)] # Reverse light from BCM
checks += [("Motor_1", 100)] # From J623 Engine control module
if CP.networkLocation == NWL.fwdCamera:
# The ACC radar is here on CANBUS.pt
signals += [("ACA_V_Wunsch", "ACC_GRA_Anziege", 0)] # ACC set speed
checks += [("ACC_GRA_Anziege", 25)] # From J428 ACC radar control module
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, CANBUS.pt)
@staticmethod
def get_mqb_cam_can_parser(CP):
# FIXME: gate LDW_02 checks on module being detected
signals = [
# sig_name, sig_address, default
("LDW_SW_Warnung_links", "LDW_02", 0), # Blind spot in warning mode on left side due to lane departure
("LDW_SW_Warnung_rechts", "LDW_02", 0), # Blind spot in warning mode on right side due to lane departure
("LDW_Seite_DLCTLC", "LDW_02", 0), # Direction of most likely lane departure (left or right)
("LDW_DLC", "LDW_02", 0), # Lane departure, distance to line crossing
("LDW_TLC", "LDW_02", 0), # Lane departure, time to line crossing
]
checks = [
# sig_address, frequency
# FIXME: LDW_02 should be checked when we have better detection of installed hardware
#("LDW_02", 10), # From R242 Driver assistance camera
]
if CP.networkLocation == NWL.gateway:
# All Extended CAN devices are here on CANBUS.cam
# FIXME: gate SWA_01 checks on module being detected, and reduce duplicate network location code
signals += [("AWV2_Priowarnung", "ACC_10", 0), # FCW related
("AWV2_Freigabe", "ACC_10", 0), # FCW related
("ANB_Teilbremsung_Freigabe", "ACC_10", 0), # AEB related
("ANB_Zielbremsung_Freigabe", "ACC_10", 0), # AEB related
("SWA_Infostufe_SWA_li", "SWA_01", 0), # Blindspot object info, left
("SWA_Warnung_SWA_li", "SWA_01", 0), # Blindspot object warning, left
("SWA_Infostufe_SWA_re", "SWA_01", 0), # Blindspot object info, right
("SWA_Warnung_SWA_re", "SWA_01", 0), # Blindspot object warning, right
("ACC_Wunschgeschw", "ACC_02", 0)] # ACC set speed
checks += [("ACC_10", 50), # From J428 ACC radar control module
# FIXME: SWA_01 should be checked when we have better detection of installed hardware
#("SWA_01", 20), # From J1086 Lane Change Assist module
("ACC_02", 17)] # From J428 ACC radar control module
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, CANBUS.cam)
@staticmethod
def get_pq_cam_can_parser(CP):
# TODO: Need to monitor LKAS camera, if present, for TLC/DLC/warning signals for passthru to SWA
signals = []
checks = []
if CP.networkLocation == NWL.gateway:
# The ACC radar is here on CANBUS.cam
signals += [("ACA_V_Wunsch", "ACC_GRA_Anziege", 0)] # ACC set speed
checks += [("ACC_GRA_Anziege", 25)] # From J428 ACC radar control module
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, CANBUS.cam)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,480
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/controls/lib/dynamic_follow/auto_df.py
|
"""
Generated using Konverter: https://github.com/ShaneSmiskol/Konverter
"""
import numpy as np
from common.travis_checker import travis
if travis:
wb = np.load('/tmp/openpilot/selfdrive/controls/lib/dynamic_follow/auto_df_weights.npz', allow_pickle=True)
else:
wb = np.load('/data/openpilot/selfdrive/controls/lib/dynamic_follow/auto_df_weights.npz', allow_pickle=True)
w, b = wb['wb']
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def predict(x):
l0 = np.dot(x, w[0]) + b[0]
l0 = np.maximum(0, l0)
l1 = np.dot(l0, w[1]) + b[1]
l1 = np.maximum(0, l1)
l2 = np.dot(l1, w[2]) + b[2]
l2 = softmax(l2)
return l2
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,481
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/process_replay/camera_replay.py
|
#!/usr/bin/env python3
import os
import sys
import time
from typing import Any
from tqdm import tqdm
from common.hardware import ANDROID
os.environ['CI'] = "1"
if ANDROID:
os.environ['QCOM_REPLAY'] = "1"
from common.spinner import Spinner
from common.timeout import Timeout
import selfdrive.manager as manager
from cereal import log
import cereal.messaging as messaging
from tools.lib.framereader import FrameReader
from tools.lib.logreader import LogReader
from selfdrive.test.openpilotci import BASE_URL, get_url
from selfdrive.test.process_replay.compare_logs import compare_logs, save_log
from selfdrive.test.process_replay.test_processes import format_diff
from selfdrive.version import get_git_commit
from common.transformations.camera import get_view_frame_from_road_frame
TEST_ROUTE = "99c94dc769b5d96e|2019-08-03--14-19-59"
def replace_calib(msg, calib):
msg = msg.as_builder()
if calib is not None:
msg.liveCalibration.extrinsicMatrix = get_view_frame_from_road_frame(*calib, 1.22).flatten().tolist()
return msg
def camera_replay(lr, fr, desire=None, calib=None):
spinner = Spinner()
spinner.update("starting model replay")
pm = messaging.PubMaster(['frame', 'liveCalibration', 'pathPlan'])
sm = messaging.SubMaster(['model', 'modelV2'])
# TODO: add dmonitoringmodeld
print("preparing procs")
manager.prepare_managed_process("camerad")
manager.prepare_managed_process("modeld")
try:
print("starting procs")
manager.start_managed_process("camerad")
manager.start_managed_process("modeld")
time.sleep(5)
sm.update(1000)
print("procs started")
desires_by_index = {v:k for k,v in log.PathPlan.Desire.schema.enumerants.items()}
cal = [msg for msg in lr if msg.which() == "liveCalibration"]
for msg in cal[:5]:
pm.send(msg.which(), replace_calib(msg, calib))
log_msgs = []
frame_idx = 0
for msg in tqdm(lr):
if msg.which() == "liveCalibration":
pm.send(msg.which(), replace_calib(msg, calib))
elif msg.which() == "frame":
if desire is not None:
for i in desire[frame_idx].nonzero()[0]:
dat = messaging.new_message('pathPlan')
dat.pathPlan.desire = desires_by_index[i]
pm.send('pathPlan', dat)
f = msg.as_builder()
img = fr.get(frame_idx, pix_fmt="rgb24")[0][:,:,::-1]
f.frame.image = img.flatten().tobytes()
frame_idx += 1
pm.send(msg.which(), f)
with Timeout(seconds=15):
log_msgs.append(messaging.recv_one(sm.sock['model']))
log_msgs.append(messaging.recv_one(sm.sock['modelV2']))
spinner.update("modeld replay %d/%d" % (frame_idx, fr.frame_count))
if frame_idx >= fr.frame_count:
break
except KeyboardInterrupt:
pass
print("replay done")
spinner.close()
manager.kill_managed_process('modeld')
time.sleep(2)
manager.kill_managed_process('camerad')
return log_msgs
if __name__ == "__main__":
update = "--update" in sys.argv
replay_dir = os.path.dirname(os.path.abspath(__file__))
ref_commit_fn = os.path.join(replay_dir, "model_replay_ref_commit")
lr = LogReader(get_url(TEST_ROUTE, 0))
fr = FrameReader(get_url(TEST_ROUTE, 0, log_type="fcamera"))
log_msgs = camera_replay(list(lr), fr)
failed = False
if not update:
ref_commit = open(ref_commit_fn).read().strip()
log_fn = "%s_%s_%s.bz2" % (TEST_ROUTE, "model", ref_commit)
cmp_log = LogReader(BASE_URL + log_fn)
ignore = ['logMonoTime', 'valid', 'model.frameDropPerc', 'model.modelExecutionTime',
'modelV2.frameDropPerc', 'modelV2.modelExecutionTime']
results: Any = {TEST_ROUTE: {}}
results[TEST_ROUTE]["modeld"] = compare_logs(cmp_log, log_msgs, ignore_fields=ignore)
diff1, diff2, failed = format_diff(results, ref_commit)
print(diff1)
with open("model_diff.txt", "w") as f:
f.write(diff2)
if update or failed:
from selfdrive.test.openpilotci import upload_file
print("Uploading new refs")
new_commit = get_git_commit()
log_fn = "%s_%s_%s.bz2" % (TEST_ROUTE, "model", new_commit)
save_log(log_fn, log_msgs)
try:
upload_file(log_fn, os.path.basename(log_fn))
except Exception as e:
print("failed to upload", e)
with open(ref_commit_fn, 'w') as f:
f.write(str(new_commit))
print("\n\nNew ref commit: ", new_commit)
sys.exit(int(failed))
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,482
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/external/simpleperf/report_html.py
|
#!/usr/bin/python
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import datetime
import json
import os
import subprocess
import sys
import tempfile
from simpleperf_report_lib import ReportLib
from utils import *
class HtmlWriter(object):
def __init__(self, output_path):
self.fh = open(output_path, 'w')
self.tag_stack = []
def close(self):
self.fh.close()
def open_tag(self, tag, **attrs):
attr_str = ''
for key in attrs:
attr_str += ' %s="%s"' % (key, attrs[key])
self.fh.write('<%s%s>' % (tag, attr_str))
self.tag_stack.append(tag)
return self
def close_tag(self, tag=None):
if tag:
assert tag == self.tag_stack[-1]
self.fh.write('</%s>\n' % self.tag_stack.pop())
def add(self, text):
self.fh.write(text)
return self
def add_file(self, file_path):
file_path = os.path.join(get_script_dir(), file_path)
with open(file_path, 'r') as f:
self.add(f.read())
return self
def modify_text_for_html(text):
return text.replace('>', '>').replace('<', '<')
class EventScope(object):
def __init__(self, name):
self.name = name
self.processes = {} # map from pid to ProcessScope
self.sample_count = 0
self.event_count = 0
def get_process(self, pid):
process = self.processes.get(pid)
if not process:
process = self.processes[pid] = ProcessScope(pid)
return process
def get_sample_info(self, gen_addr_hit_map):
result = {}
result['eventName'] = self.name
result['eventCount'] = self.event_count
result['processes'] = [process.get_sample_info(gen_addr_hit_map)
for process in self.processes.values()]
return result
class ProcessScope(object):
def __init__(self, pid):
self.pid = pid
self.name = ''
self.event_count = 0
self.threads = {} # map from tid to ThreadScope
def get_thread(self, tid, thread_name):
thread = self.threads.get(tid)
if not thread:
thread = self.threads[tid] = ThreadScope(tid)
thread.name = thread_name
if self.pid == tid:
self.name = thread_name
return thread
def get_sample_info(self, gen_addr_hit_map):
result = {}
result['pid'] = self.pid
result['eventCount'] = self.event_count
result['threads'] = [thread.get_sample_info(gen_addr_hit_map)
for thread in self.threads.values()]
return result
class ThreadScope(object):
def __init__(self, tid):
self.tid = tid
self.name = ''
self.event_count = 0
self.libs = {} # map from lib_id to LibScope
def add_callstack(self, event_count, callstack, build_addr_hit_map):
""" callstack is a list of tuple (lib_id, func_id, addr).
For each i > 0, callstack[i] calls callstack[i-1]."""
hit_func_ids = set()
for i in range(len(callstack)):
lib_id, func_id, addr = callstack[i]
# When a callstack contains recursive function, only add for each function once.
if func_id in hit_func_ids:
continue
hit_func_ids.add(func_id)
lib = self.libs.get(lib_id)
if not lib:
lib = self.libs[lib_id] = LibScope(lib_id)
function = lib.get_function(func_id)
if i == 0:
lib.event_count += event_count
function.sample_count += 1
function.add_reverse_callchain(callstack, i + 1, len(callstack), event_count)
if build_addr_hit_map:
function.build_addr_hit_map(addr, event_count if i == 0 else 0, event_count)
hit_func_ids.clear()
for i in range(len(callstack) - 1, -1, -1):
lib_id, func_id, _ = callstack[i]
# When a callstack contains recursive function, only add for each function once.
if func_id in hit_func_ids:
continue
hit_func_ids.add(func_id)
lib = self.libs.get(lib_id)
lib.get_function(func_id).add_callchain(callstack, i - 1, -1, event_count)
def get_sample_info(self, gen_addr_hit_map):
result = {}
result['tid'] = self.tid
result['eventCount'] = self.event_count
result['libs'] = [lib.gen_sample_info(gen_addr_hit_map)
for lib in self.libs.values()]
return result
class LibScope(object):
def __init__(self, lib_id):
self.lib_id = lib_id
self.event_count = 0
self.functions = {} # map from func_id to FunctionScope.
def get_function(self, func_id):
function = self.functions.get(func_id)
if not function:
function = self.functions[func_id] = FunctionScope(func_id)
return function
def gen_sample_info(self, gen_addr_hit_map):
result = {}
result['libId'] = self.lib_id
result['eventCount'] = self.event_count
result['functions'] = [func.gen_sample_info(gen_addr_hit_map)
for func in self.functions.values()]
return result
class FunctionScope(object):
def __init__(self, func_id):
self.sample_count = 0
self.call_graph = CallNode(func_id)
self.reverse_call_graph = CallNode(func_id)
self.addr_hit_map = None # map from addr to [event_count, subtree_event_count].
# map from (source_file_id, line) to [event_count, subtree_event_count].
self.line_hit_map = None
def add_callchain(self, callchain, start, end, event_count):
node = self.call_graph
for i in range(start, end, -1):
node = node.get_child(callchain[i][1])
node.event_count += event_count
def add_reverse_callchain(self, callchain, start, end, event_count):
node = self.reverse_call_graph
for i in range(start, end):
node = node.get_child(callchain[i][1])
node.event_count += event_count
def build_addr_hit_map(self, addr, event_count, subtree_event_count):
if self.addr_hit_map is None:
self.addr_hit_map = {}
count_info = self.addr_hit_map.get(addr)
if count_info is None:
self.addr_hit_map[addr] = [event_count, subtree_event_count]
else:
count_info[0] += event_count
count_info[1] += subtree_event_count
def build_line_hit_map(self, source_file_id, line, event_count, subtree_event_count):
if self.line_hit_map is None:
self.line_hit_map = {}
key = (source_file_id, line)
count_info = self.line_hit_map.get(key)
if count_info is None:
self.line_hit_map[key] = [event_count, subtree_event_count]
else:
count_info[0] += event_count
count_info[1] += subtree_event_count
def update_subtree_event_count(self):
a = self.call_graph.update_subtree_event_count()
b = self.reverse_call_graph.update_subtree_event_count()
return max(a, b)
def limit_callchain_percent(self, min_callchain_percent, hit_func_ids):
min_limit = min_callchain_percent * 0.01 * self.call_graph.subtree_event_count
self.call_graph.cut_edge(min_limit, hit_func_ids)
self.reverse_call_graph.cut_edge(min_limit, hit_func_ids)
def gen_sample_info(self, gen_addr_hit_map):
result = {}
result['c'] = self.sample_count
result['g'] = self.call_graph.gen_sample_info()
result['rg'] = self.reverse_call_graph.gen_sample_info()
if self.line_hit_map:
items = []
for key in self.line_hit_map:
count_info = self.line_hit_map[key]
item = {'f': key[0], 'l': key[1], 'e': count_info[0], 's': count_info[1]}
items.append(item)
result['s'] = items
if gen_addr_hit_map and self.addr_hit_map:
items = []
for addr in sorted(self.addr_hit_map):
count_info = self.addr_hit_map[addr]
items.append({'a': addr, 'e': count_info[0], 's': count_info[1]})
result['a'] = items
return result
class CallNode(object):
def __init__(self, func_id):
self.event_count = 0
self.subtree_event_count = 0
self.func_id = func_id
self.children = {} # map from func_id to CallNode
def get_child(self, func_id):
child = self.children.get(func_id)
if not child:
child = self.children[func_id] = CallNode(func_id)
return child
def update_subtree_event_count(self):
self.subtree_event_count = self.event_count
for child in self.children.values():
self.subtree_event_count += child.update_subtree_event_count()
return self.subtree_event_count
def cut_edge(self, min_limit, hit_func_ids):
hit_func_ids.add(self.func_id)
to_del_children = []
for key in self.children:
child = self.children[key]
if child.subtree_event_count < min_limit:
to_del_children.append(key)
else:
child.cut_edge(min_limit, hit_func_ids)
for key in to_del_children:
del self.children[key]
def gen_sample_info(self):
result = {}
result['e'] = self.event_count
result['s'] = self.subtree_event_count
result['f'] = self.func_id
result['c'] = [child.gen_sample_info() for child in self.children.values()]
return result
class LibSet(object):
""" Collection of shared libraries used in perf.data. """
def __init__(self):
self.lib_name_to_id = {}
self.lib_id_to_name = []
def get_lib_id(self, lib_name):
lib_id = self.lib_name_to_id.get(lib_name)
if lib_id is None:
lib_id = len(self.lib_id_to_name)
self.lib_name_to_id[lib_name] = lib_id
self.lib_id_to_name.append(lib_name)
return lib_id
def get_lib_name(self, lib_id):
return self.lib_id_to_name[lib_id]
class Function(object):
""" Represent a function in a shared library. """
def __init__(self, lib_id, func_name, func_id, start_addr, addr_len):
self.lib_id = lib_id
self.func_name = func_name
self.func_id = func_id
self.start_addr = start_addr
self.addr_len = addr_len
self.source_info = None
self.disassembly = None
class FunctionSet(object):
""" Collection of functions used in perf.data. """
def __init__(self):
self.name_to_func = {}
self.id_to_func = {}
def get_func_id(self, lib_id, symbol):
key = (lib_id, symbol.symbol_name)
function = self.name_to_func.get(key)
if function is None:
func_id = len(self.id_to_func)
function = Function(lib_id, symbol.symbol_name, func_id, symbol.symbol_addr,
symbol.symbol_len)
self.name_to_func[key] = function
self.id_to_func[func_id] = function
return function.func_id
def trim_functions(self, left_func_ids):
""" Remove functions excepts those in left_func_ids. """
for function in self.name_to_func.values():
if function.func_id not in left_func_ids:
del self.id_to_func[function.func_id]
# name_to_func will not be used.
self.name_to_func = None
class SourceFile(object):
""" A source file containing source code hit by samples. """
def __init__(self, file_id, abstract_path):
self.file_id = file_id
self.abstract_path = abstract_path # path reported by addr2line
self.real_path = None # file path in the file system
self.requested_lines = set()
self.line_to_code = {} # map from line to code in that line.
def request_lines(self, start_line, end_line):
self.requested_lines |= set(range(start_line, end_line + 1))
def add_source_code(self, real_path):
self.real_path = real_path
with open(real_path, 'r') as f:
source_code = f.readlines()
max_line = len(source_code)
for line in self.requested_lines:
if line > 0 and line <= max_line:
self.line_to_code[line] = source_code[line - 1]
# requested_lines is no longer used.
self.requested_lines = None
class SourceFileSet(object):
""" Collection of source files. """
def __init__(self):
self.path_to_source_files = {} # map from file path to SourceFile.
def get_source_file(self, file_path):
source_file = self.path_to_source_files.get(file_path)
if source_file is None:
source_file = SourceFile(len(self.path_to_source_files), file_path)
self.path_to_source_files[file_path] = source_file
return source_file
def load_source_code(self, source_dirs):
file_searcher = SourceFileSearcher(source_dirs)
for source_file in self.path_to_source_files.values():
real_path = file_searcher.get_real_path(source_file.abstract_path)
if real_path:
source_file.add_source_code(real_path)
class SourceFileSearcher(object):
SOURCE_FILE_EXTS = {'.h', '.hh', '.H', '.hxx', '.hpp', '.h++',
'.c', '.cc', '.C', '.cxx', '.cpp', '.c++',
'.java', '.kt'}
@classmethod
def is_source_filename(cls, filename):
ext = os.path.splitext(filename)[1]
return ext in cls.SOURCE_FILE_EXTS
"""" Find source file paths in the file system.
The file paths reported by addr2line are the paths stored in debug sections
of shared libraries. And we need to convert them to file paths in the file
system. It is done in below steps:
1. Collect all file paths under the provided source_dirs. The suffix of a
source file should contain one of below:
h: for C/C++ header files.
c: for C/C++ source files.
java: for Java source files.
kt: for Kotlin source files.
2. Given an abstract_path reported by addr2line, select the best real path
as below:
2.1 Find all real paths with the same file name as the abstract path.
2.2 Select the real path having the longest common suffix with the abstract path.
"""
def __init__(self, source_dirs):
# Map from filename to a list of reversed directory path containing filename.
self.filename_to_rparents = {}
self._collect_paths(source_dirs)
def _collect_paths(self, source_dirs):
for source_dir in source_dirs:
for parent, _, file_names in os.walk(source_dir):
rparent = None
for file_name in file_names:
if self.is_source_filename(file_name):
rparents = self.filename_to_rparents.get(file_name)
if rparents is None:
rparents = self.filename_to_rparents[file_name] = []
if rparent is None:
rparent = parent[::-1]
rparents.append(rparent)
def get_real_path(self, abstract_path):
abstract_path = abstract_path.replace('/', os.sep)
abstract_parent, file_name = os.path.split(abstract_path)
abstract_rparent = abstract_parent[::-1]
real_rparents = self.filename_to_rparents.get(file_name)
if real_rparents is None:
return None
best_matched_rparent = None
best_common_length = -1
for real_rparent in real_rparents:
length = len(os.path.commonprefix((real_rparent, abstract_rparent)))
if length > best_common_length:
best_common_length = length
best_matched_rparent = real_rparent
if best_matched_rparent is None:
return None
return os.path.join(best_matched_rparent[::-1], file_name)
class RecordData(object):
"""RecordData reads perf.data, and generates data used by report.js in json format.
All generated items are listed as below:
1. recordTime: string
2. machineType: string
3. androidVersion: string
4. recordCmdline: string
5. totalSamples: int
6. processNames: map from pid to processName.
7. threadNames: map from tid to threadName.
8. libList: an array of libNames, indexed by libId.
9. functionMap: map from functionId to funcData.
funcData = {
l: libId
f: functionName
s: [sourceFileId, startLine, endLine] [optional]
d: [(disassembly, addr)] [optional]
}
10. sampleInfo = [eventInfo]
eventInfo = {
eventName
eventCount
processes: [processInfo]
}
processInfo = {
pid
eventCount
threads: [threadInfo]
}
threadInfo = {
tid
eventCount
libs: [libInfo],
}
libInfo = {
libId,
eventCount,
functions: [funcInfo]
}
funcInfo = {
c: sampleCount
g: callGraph
rg: reverseCallgraph
s: [sourceCodeInfo] [optional]
a: [addrInfo] (sorted by addrInfo.addr) [optional]
}
callGraph and reverseCallGraph are both of type CallNode.
callGraph shows how a function calls other functions.
reverseCallGraph shows how a function is called by other functions.
CallNode {
e: selfEventCount
s: subTreeEventCount
f: functionId
c: [CallNode] # children
}
sourceCodeInfo {
f: sourceFileId
l: line
e: eventCount
s: subtreeEventCount
}
addrInfo {
a: addr
e: eventCount
s: subtreeEventCount
}
11. sourceFiles: an array of sourceFile, indexed by sourceFileId.
sourceFile {
path
code: # a map from line to code for that line.
}
"""
def __init__(self, binary_cache_path, ndk_path, build_addr_hit_map):
self.binary_cache_path = binary_cache_path
self.ndk_path = ndk_path
self.build_addr_hit_map = build_addr_hit_map
self.meta_info = None
self.cmdline = None
self.arch = None
self.events = {}
self.libs = LibSet()
self.functions = FunctionSet()
self.total_samples = 0
self.source_files = SourceFileSet()
self.gen_addr_hit_map_in_record_info = False
def load_record_file(self, record_file, show_art_frames):
lib = ReportLib()
lib.SetRecordFile(record_file)
# If not showing ip for unknown symbols, the percent of the unknown symbol may be
# accumulated to very big, and ranks first in the sample table.
lib.ShowIpForUnknownSymbol()
if show_art_frames:
lib.ShowArtFrames()
if self.binary_cache_path:
lib.SetSymfs(self.binary_cache_path)
self.meta_info = lib.MetaInfo()
self.cmdline = lib.GetRecordCmd()
self.arch = lib.GetArch()
while True:
raw_sample = lib.GetNextSample()
if not raw_sample:
lib.Close()
break
raw_event = lib.GetEventOfCurrentSample()
symbol = lib.GetSymbolOfCurrentSample()
callchain = lib.GetCallChainOfCurrentSample()
event = self._get_event(raw_event.name)
self.total_samples += 1
event.sample_count += 1
event.event_count += raw_sample.period
process = event.get_process(raw_sample.pid)
process.event_count += raw_sample.period
thread = process.get_thread(raw_sample.tid, raw_sample.thread_comm)
thread.event_count += raw_sample.period
lib_id = self.libs.get_lib_id(symbol.dso_name)
func_id = self.functions.get_func_id(lib_id, symbol)
callstack = [(lib_id, func_id, symbol.vaddr_in_file)]
for i in range(callchain.nr):
symbol = callchain.entries[i].symbol
lib_id = self.libs.get_lib_id(symbol.dso_name)
func_id = self.functions.get_func_id(lib_id, symbol)
callstack.append((lib_id, func_id, symbol.vaddr_in_file))
thread.add_callstack(raw_sample.period, callstack, self.build_addr_hit_map)
for event in self.events.values():
for process in event.processes.values():
for thread in process.threads.values():
for lib in thread.libs.values():
for func_id in lib.functions:
function = lib.functions[func_id]
function.update_subtree_event_count()
def limit_percents(self, min_func_percent, min_callchain_percent):
hit_func_ids = set()
for event in self.events.values():
min_limit = event.event_count * min_func_percent * 0.01
for process in event.processes.values():
for thread in process.threads.values():
for lib in thread.libs.values():
to_del_func_ids = []
for func_id in lib.functions:
function = lib.functions[func_id]
if function.call_graph.subtree_event_count < min_limit:
to_del_func_ids.append(func_id)
else:
function.limit_callchain_percent(min_callchain_percent,
hit_func_ids)
for func_id in to_del_func_ids:
del lib.functions[func_id]
self.functions.trim_functions(hit_func_ids)
def _get_event(self, event_name):
if event_name not in self.events:
self.events[event_name] = EventScope(event_name)
return self.events[event_name]
def add_source_code(self, source_dirs):
""" Collect source code information:
1. Find line ranges for each function in FunctionSet.
2. Find line for each addr in FunctionScope.addr_hit_map.
3. Collect needed source code in SourceFileSet.
"""
addr2line = Addr2Nearestline(self.ndk_path, self.binary_cache_path)
# Request line range for each function.
for function in self.functions.id_to_func.values():
if function.func_name == 'unknown':
continue
lib_name = self.libs.get_lib_name(function.lib_id)
addr2line.add_addr(lib_name, function.start_addr, function.start_addr)
addr2line.add_addr(lib_name, function.start_addr,
function.start_addr + function.addr_len - 1)
# Request line for each addr in FunctionScope.addr_hit_map.
for event in self.events.values():
for process in event.processes.values():
for thread in process.threads.values():
for lib in thread.libs.values():
lib_name = self.libs.get_lib_name(lib.lib_id)
for function in lib.functions.values():
func_addr = self.functions.id_to_func[
function.call_graph.func_id].start_addr
for addr in function.addr_hit_map:
addr2line.add_addr(lib_name, func_addr, addr)
addr2line.convert_addrs_to_lines()
# Set line range for each function.
for function in self.functions.id_to_func.values():
if function.func_name == 'unknown':
continue
dso = addr2line.get_dso(self.libs.get_lib_name(function.lib_id))
start_source = addr2line.get_addr_source(dso, function.start_addr)
end_source = addr2line.get_addr_source(dso,
function.start_addr + function.addr_len - 1)
if not start_source or not end_source:
continue
start_file_path, start_line = start_source[-1]
end_file_path, end_line = end_source[-1]
if start_file_path != end_file_path or start_line > end_line:
continue
source_file = self.source_files.get_source_file(start_file_path)
source_file.request_lines(start_line, end_line)
function.source_info = (source_file.file_id, start_line, end_line)
# Build FunctionScope.line_hit_map.
for event in self.events.values():
for process in event.processes.values():
for thread in process.threads.values():
for lib in thread.libs.values():
dso = addr2line.get_dso(self.libs.get_lib_name(lib.lib_id))
for function in lib.functions.values():
for addr in function.addr_hit_map:
source = addr2line.get_addr_source(dso, addr)
if not source:
continue
for file_path, line in source:
source_file = self.source_files.get_source_file(file_path)
# Show [line - 5, line + 5] of the line hit by a sample.
source_file.request_lines(line - 5, line + 5)
count_info = function.addr_hit_map[addr]
function.build_line_hit_map(source_file.file_id, line,
count_info[0], count_info[1])
# Collect needed source code in SourceFileSet.
self.source_files.load_source_code(source_dirs)
def add_disassembly(self):
""" Collect disassembly information:
1. Use objdump to collect disassembly for each function in FunctionSet.
2. Set flag to dump addr_hit_map when generating record info.
"""
objdump = Objdump(self.ndk_path, self.binary_cache_path)
for function in self.functions.id_to_func.values():
if function.func_name == 'unknown':
continue
lib_name = self.libs.get_lib_name(function.lib_id)
code = objdump.disassemble_code(lib_name, function.start_addr, function.addr_len)
function.disassembly = code
self.gen_addr_hit_map_in_record_info = True
def gen_record_info(self):
record_info = {}
timestamp = self.meta_info.get('timestamp')
if timestamp:
t = datetime.datetime.fromtimestamp(int(timestamp))
else:
t = datetime.datetime.now()
record_info['recordTime'] = t.strftime('%Y-%m-%d (%A) %H:%M:%S')
product_props = self.meta_info.get('product_props')
machine_type = self.arch
if product_props:
manufacturer, model, name = product_props.split(':')
machine_type = '%s (%s) by %s, arch %s' % (model, name, manufacturer, self.arch)
record_info['machineType'] = machine_type
record_info['androidVersion'] = self.meta_info.get('android_version', '')
record_info['recordCmdline'] = self.cmdline
record_info['totalSamples'] = self.total_samples
record_info['processNames'] = self._gen_process_names()
record_info['threadNames'] = self._gen_thread_names()
record_info['libList'] = self._gen_lib_list()
record_info['functionMap'] = self._gen_function_map()
record_info['sampleInfo'] = self._gen_sample_info()
record_info['sourceFiles'] = self._gen_source_files()
return record_info
def _gen_process_names(self):
process_names = {}
for event in self.events.values():
for process in event.processes.values():
process_names[process.pid] = process.name
return process_names
def _gen_thread_names(self):
thread_names = {}
for event in self.events.values():
for process in event.processes.values():
for thread in process.threads.values():
thread_names[thread.tid] = thread.name
return thread_names
def _gen_lib_list(self):
return [modify_text_for_html(x) for x in self.libs.lib_id_to_name]
def _gen_function_map(self):
func_map = {}
for func_id in sorted(self.functions.id_to_func):
function = self.functions.id_to_func[func_id]
func_data = {}
func_data['l'] = function.lib_id
func_data['f'] = modify_text_for_html(function.func_name)
if function.source_info:
func_data['s'] = function.source_info
if function.disassembly:
disassembly_list = []
for code, addr in function.disassembly:
disassembly_list.append([modify_text_for_html(code), addr])
func_data['d'] = disassembly_list
func_map[func_id] = func_data
return func_map
def _gen_sample_info(self):
return [event.get_sample_info(self.gen_addr_hit_map_in_record_info)
for event in self.events.values()]
def _gen_source_files(self):
source_files = sorted(self.source_files.path_to_source_files.values(),
key=lambda x: x.file_id)
file_list = []
for source_file in source_files:
file_data = {}
if not source_file.real_path:
file_data['path'] = ''
file_data['code'] = {}
else:
file_data['path'] = source_file.real_path
code_map = {}
for line in source_file.line_to_code:
code_map[line] = modify_text_for_html(source_file.line_to_code[line])
file_data['code'] = code_map
file_list.append(file_data)
return file_list
class ReportGenerator(object):
def __init__(self, html_path):
self.hw = HtmlWriter(html_path)
self.hw.open_tag('html')
self.hw.open_tag('head')
self.hw.open_tag('link', rel='stylesheet', type='text/css',
href='https://code.jquery.com/ui/1.12.0/themes/smoothness/jquery-ui.css'
).close_tag()
self.hw.open_tag('link', rel='stylesheet', type='text/css',
href='https://cdn.datatables.net/1.10.16/css/jquery.dataTables.min.css'
).close_tag()
self.hw.open_tag('script', src='https://www.gstatic.com/charts/loader.js').close_tag()
self.hw.open_tag('script').add(
"google.charts.load('current', {'packages': ['corechart', 'table']});").close_tag()
self.hw.open_tag('script', src='https://code.jquery.com/jquery-3.2.1.js').close_tag()
self.hw.open_tag('script', src='https://code.jquery.com/ui/1.12.1/jquery-ui.js'
).close_tag()
self.hw.open_tag('script',
src='https://cdn.datatables.net/1.10.16/js/jquery.dataTables.min.js').close_tag()
self.hw.open_tag('script',
src='https://cdn.datatables.net/1.10.16/js/dataTables.jqueryui.min.js').close_tag()
self.hw.open_tag('style', type='text/css').add("""
.colForLine { width: 50px; }
.colForCount { width: 100px; }
.tableCell { font-size: 17px; }
.boldTableCell { font-weight: bold; font-size: 17px; }
""").close_tag()
self.hw.close_tag('head')
self.hw.open_tag('body')
self.record_info = {}
def write_content_div(self):
self.hw.open_tag('div', id='report_content').close_tag()
def write_record_data(self, record_data):
self.hw.open_tag('script', id='record_data', type='application/json')
self.hw.add(json.dumps(record_data))
self.hw.close_tag()
def write_flamegraph(self, flamegraph):
self.hw.add(flamegraph)
def write_script(self):
self.hw.open_tag('script').add_file('report_html.js').close_tag()
def finish(self):
self.hw.close_tag('body')
self.hw.close_tag('html')
self.hw.close()
def gen_flamegraph(record_file, show_art_frames):
fd, flamegraph_path = tempfile.mkstemp()
os.close(fd)
inferno_script_path = os.path.join(get_script_dir(), 'inferno', 'inferno.py')
args = [sys.executable, inferno_script_path, '-sc', '-o', flamegraph_path,
'--record_file', record_file, '--embedded_flamegraph', '--no_browser']
if show_art_frames:
args.append('--show_art_frames')
subprocess.check_call(args)
with open(flamegraph_path, 'r') as fh:
data = fh.read()
remove(flamegraph_path)
return data
def main():
parser = argparse.ArgumentParser(description='report profiling data')
parser.add_argument('-i', '--record_file', nargs='+', default=['perf.data'], help="""
Set profiling data file to report. Default is perf.data.""")
parser.add_argument('-o', '--report_path', default='report.html', help="""
Set output html file. Default is report.html.""")
parser.add_argument('--min_func_percent', default=0.01, type=float, help="""
Set min percentage of functions shown in the report.
For example, when set to 0.01, only functions taking >= 0.01%% of total
event count are collected in the report. Default is 0.01.""")
parser.add_argument('--min_callchain_percent', default=0.01, type=float, help="""
Set min percentage of callchains shown in the report.
It is used to limit nodes shown in the function flamegraph. For example,
when set to 0.01, only callchains taking >= 0.01%% of the event count of
the starting function are collected in the report. Default is 0.01.""")
parser.add_argument('--add_source_code', action='store_true', help='Add source code.')
parser.add_argument('--source_dirs', nargs='+', help='Source code directories.')
parser.add_argument('--add_disassembly', action='store_true', help='Add disassembled code.')
parser.add_argument('--ndk_path', nargs=1, help='Find tools in the ndk path.')
parser.add_argument('--no_browser', action='store_true', help="Don't open report in browser.")
parser.add_argument('--show_art_frames', action='store_true',
help='Show frames of internal methods in the ART Java interpreter.')
args = parser.parse_args()
# 1. Process args.
binary_cache_path = 'binary_cache'
if not os.path.isdir(binary_cache_path):
if args.add_source_code or args.add_disassembly:
log_exit("""binary_cache/ doesn't exist. Can't add source code or disassembled code
without collected binaries. Please run binary_cache_builder.py to
collect binaries for current profiling data, or run app_profiler.py
without -nb option.""")
binary_cache_path = None
if args.add_source_code and not args.source_dirs:
log_exit('--source_dirs is needed to add source code.')
build_addr_hit_map = args.add_source_code or args.add_disassembly
ndk_path = None if not args.ndk_path else args.ndk_path[0]
# 2. Produce record data.
record_data = RecordData(binary_cache_path, ndk_path, build_addr_hit_map)
for record_file in args.record_file:
record_data.load_record_file(record_file, args.show_art_frames)
record_data.limit_percents(args.min_func_percent, args.min_callchain_percent)
if args.add_source_code:
record_data.add_source_code(args.source_dirs)
if args.add_disassembly:
record_data.add_disassembly()
# 3. Generate report html.
report_generator = ReportGenerator(args.report_path)
report_generator.write_content_div()
report_generator.write_record_data(record_data.gen_record_info())
report_generator.write_script()
# TODO: support multiple perf.data in flamegraph.
if len(args.record_file) > 1:
log_warning('flamegraph will only be shown for %s' % args.record_file[0])
flamegraph = gen_flamegraph(args.record_file[0], args.show_art_frames)
report_generator.write_flamegraph(flamegraph)
report_generator.finish()
if not args.no_browser:
open_report_in_browser(args.report_path)
log_info("Report generated at '%s'." % args.report_path)
if __name__ == '__main__':
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,483
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/data_collection/gps_uploader.py
|
import os
import json
import gzip
import ftplib
import string
import random
import datetime
from common.params import Params
from common.op_params import opParams
op_params = opParams()
uniqueID = op_params.get('uniqueID')
def upload_data():
filepath = "/data/openpilot/selfdrive/data_collection/gps-data"
if os.path.isfile(filepath):
if uniqueID is None:
op_params.put('uniqueID', ''.join([random.choice(string.ascii_lowercase+string.ascii_uppercase+string.digits) for i in range(15)]))
try:
username = op_params.get('uniqueID')
try:
with open("/data/data/ai.comma.plus.offroad/files/persistStore/persist-auth", "r") as f:
auth = json.loads(f.read())
auth = json.loads(auth['commaUser'])
if auth and str(auth['username']) != "":
username = str(auth['username'])
except Exception:
pass
params = Params()
car = params.get('CachedFingerprint')
if car is not None:
car = json.loads(car)
username+="-{}".format(car[0])
filename = "gps-data.{}".format(random.randint(1,99999))
fp = open(filepath,"rb")
data = fp.read()
bindata = bytearray(data)
with gzip.open("/data/" + filename + ".gz", "wb") as f:
f.write(bindata)
ftp = ftplib.FTP("arneschwarck.dyndns.org")
ftp.login("openpilot", "communitypilot")
with open("/data/" + filename + ".gz", "rb") as f:
try:
ftp.mkd("/{}".format(username))
except Exception:
pass
ftp.storbinary("STOR /{}/{}".format(username, filename + ".gz"), f)
ftp.quit()
os.remove(filepath)
os.remove("/data/" + filename + ".gz")
t = datetime.datetime.utcnow().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
return True
except Exception:
return False
else:
return False
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,484
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/travis_checker.py
|
from common.basedir import BASEDIR
travis = BASEDIR.strip('/').split('/')[0] != 'data'
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,485
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/nui/get_files_comma_api.py
|
#!/usr/bin/env python3
import json
import sys
from tools.lib.route import Route
route_name = sys.argv[1]
routes = Route(route_name)
data_dump = {
"camera": routes.camera_paths(),
"logs": routes.log_paths()
}
json.dump(data_dump, open("routes.json", "w"))
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,486
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/dp_time.py
|
#!/usr/bin/env python3.7
# delay of reading last modified
LAST_MODIFIED_DYNAMIC_FOLLOW = 3.
LAST_MODIFIED_THERMALD = 10.
LAST_MODIFIED_SYSTEMD = 1.
LAST_MODIFIED_LANE_PLANNER = 3.
LAST_MODIFIED_UPLOADER = 10.
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,487
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/replay/sensorium.py
|
#!/usr/bin/env python3
# Question: Can a human drive from this data?
import os
import cv2 # pylint: disable=import-error
import numpy as np
import cereal.messaging as messaging
from common.window import Window
if os.getenv("BIG") is not None:
from common.transformations.model import BIGMODEL_INPUT_SIZE as MEDMODEL_INPUT_SIZE
from common.transformations.model import get_camera_frame_from_bigmodel_frame as get_camera_frame_from_medmodel_frame
else:
from common.transformations.model import MEDMODEL_INPUT_SIZE
from common.transformations.model import get_camera_frame_from_medmodel_frame
from tools.replay.lib.ui_helpers import CalibrationTransformsForWarpMatrix, _FULL_FRAME_SIZE, _INTRINSICS
if __name__ == "__main__":
sm = messaging.SubMaster(['liveCalibration'])
frame = messaging.sub_sock('frame', conflate=True)
win = Window(MEDMODEL_INPUT_SIZE[0], MEDMODEL_INPUT_SIZE[1], double=True)
num_px = 0
calibration = None
imgff = None
while 1:
fpkt = messaging.recv_one(frame)
if fpkt is None or len(fpkt.frame.image) == 0:
continue
sm.update(timeout=1)
rgb_img_raw = fpkt.frame.image
num_px = len(rgb_img_raw) // 3
if rgb_img_raw and num_px in _FULL_FRAME_SIZE.keys():
FULL_FRAME_SIZE = _FULL_FRAME_SIZE[num_px]
imgff = np.frombuffer(rgb_img_raw, dtype=np.uint8).reshape((FULL_FRAME_SIZE[1], FULL_FRAME_SIZE[0], 3))
imgff = imgff[:, :, ::-1] # Convert BGR to RGB
if sm.updated['liveCalibration'] and num_px:
intrinsic_matrix = _INTRINSICS[num_px]
img_transform = np.array(fpkt.frame.transform).reshape(3, 3)
extrinsic_matrix = np.asarray(sm['liveCalibration'].extrinsicMatrix).reshape(3, 4)
ke = intrinsic_matrix.dot(extrinsic_matrix)
warp_matrix = get_camera_frame_from_medmodel_frame(ke)
calibration = CalibrationTransformsForWarpMatrix(num_px, warp_matrix, intrinsic_matrix, extrinsic_matrix)
transform = np.dot(img_transform, calibration.model_to_full_frame)
if calibration is not None and imgff is not None:
imgw = cv2.warpAffine(imgff, transform[:2],
(MEDMODEL_INPUT_SIZE[0], MEDMODEL_INPUT_SIZE[1]),
flags=cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC)
win.draw(imgw)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,488
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/replay/rqplot.py
|
#!/usr/bin/env python
# type: ignore
import sys
import matplotlib.pyplot as plt
import numpy as np
import cereal.messaging as messaging
import time
# tool to plot one or more signals live. Call ex:
#./rqplot.py log.carState.vEgo log.carState.aEgo
# TODO: can this tool consume 10x less cpu?
def recursive_getattr(x, name):
l = name.split('.')
if len(l) == 1:
return getattr(x, name)
else:
return recursive_getattr(getattr(x, l[0]), ".".join(l[1:]) )
if __name__ == "__main__":
poller = messaging.Poller()
services = []
fields = []
subs = []
values = []
plt.ion()
fig, ax = plt.subplots()
#fig = plt.figure(figsize=(10, 15))
#ax = fig.add_subplot(111)
ax.grid(True)
fig.canvas.draw()
subs_name = sys.argv[1:]
lines = []
x, y = [], []
LEN = 500
for i, sub in enumerate(subs_name):
sub_split = sub.split(".")
services.append(sub_split[0])
fields.append(".".join(sub_split[1:]))
subs.append(messaging.sub_sock(sub_split[0], poller))
x.append(np.ones(LEN)*np.nan)
y.append(np.ones(LEN)*np.nan)
lines.append(ax.plot(x[i], y[i])[0])
for l in lines:
l.set_marker("*")
cur_t = 0.
ax.legend(subs_name)
ax.set_xlabel('time [s]')
while 1:
print(1./(time.time() - cur_t))
cur_t = time.time()
for i, s in enumerate(subs):
msg = messaging.recv_sock(s)
#msg = messaging.recv_one_or_none(s)
if msg is not None:
x[i] = np.append(x[i], getattr(msg, 'logMonoTime') / float(1e9))
x[i] = np.delete(x[i], 0)
y[i] = np.append(y[i], recursive_getattr(msg, subs_name[i]))
y[i] = np.delete(y[i], 0)
lines[i].set_xdata(x[i])
lines[i].set_ydata(y[i])
ax.relim()
ax.autoscale_view(True, scaley=True, scalex=True)
fig.canvas.blit(ax.bbox)
fig.canvas.flush_events()
# just a bit of wait to avoid 100% CPU usage
time.sleep(0.001)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,489
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/replay/unlogger.py
|
#!/usr/bin/env python3
import argparse
import os
import sys
import zmq
import time
import signal
import multiprocessing
from uuid import uuid4
from collections import namedtuple
from collections import deque
from datetime import datetime
# strat 1: script to copy files
# strat 2: build pip packages around these
# could be its own pip package, which we'd need to build and release
from cereal import log as capnp_log
from cereal.services import service_list
from cereal.messaging import pub_sock, MultiplePublishersError
from common import realtime
from tools.lib.kbhit import KBHit
from tools.lib.logreader import MultiLogIterator
from tools.lib.route import Route
from tools.lib.route_framereader import RouteFrameReader
# Commands.
SetRoute = namedtuple("SetRoute", ("name", "start_time", "data_dir"))
SeekAbsoluteTime = namedtuple("SeekAbsoluteTime", ("secs",))
SeekRelativeTime = namedtuple("SeekRelativeTime", ("secs",))
TogglePause = namedtuple("TogglePause", ())
StopAndQuit = namedtuple("StopAndQuit", ())
class UnloggerWorker(object):
def __init__(self):
self._frame_reader = None
self._cookie = None
self._readahead = deque()
def run(self, commands_address, data_address, pub_types):
zmq.Context._instance = None
commands_socket = zmq.Context.instance().socket(zmq.PULL)
commands_socket.connect(commands_address)
data_socket = zmq.Context.instance().socket(zmq.PUSH)
data_socket.connect(data_address)
poller = zmq.Poller()
poller.register(commands_socket, zmq.POLLIN)
# We can't publish frames without encodeIdx, so add when it's missing.
if "frame" in pub_types:
pub_types["encodeIdx"] = None
# gc.set_debug(gc.DEBUG_LEAK | gc.DEBUG_OBJECTS | gc.DEBUG_STATS | gc.DEBUG_SAVEALL |
# gc.DEBUG_UNCOLLECTABLE)
# TODO: WARNING pycapnp leaks memory all over the place after unlogger runs for a while, gc
# pauses become huge because there are so many tracked objects solution will be to switch to new
# cython capnp
try:
route = None
while True:
while poller.poll(0.) or route is None:
cookie, cmd = commands_socket.recv_pyobj()
route = self._process_commands(cmd, route, pub_types)
# **** get message ****
self._read_logs(cookie, pub_types)
self._send_logs(data_socket)
finally:
if self._frame_reader is not None:
self._frame_reader.close()
data_socket.close()
commands_socket.close()
def _read_logs(self, cookie, pub_types):
fullHEVC = capnp_log.EncodeIndex.Type.fullHEVC
lr = self._lr
while len(self._readahead) < 1000:
route_time = lr.tell()
msg = next(lr)
typ = msg.which()
if typ not in pub_types:
continue
# **** special case certain message types ****
if typ == "encodeIdx" and msg.encodeIdx.type == fullHEVC:
# this assumes the encodeIdx always comes before the frame
self._frame_id_lookup[
msg.encodeIdx.frameId] = msg.encodeIdx.segmentNum, msg.encodeIdx.segmentId
#print "encode", msg.encodeIdx.frameId, len(self._readahead), route_time
self._readahead.appendleft((typ, msg, route_time, cookie))
def _send_logs(self, data_socket):
while len(self._readahead) > 500:
typ, msg, route_time, cookie = self._readahead.pop()
smsg = msg.as_builder()
if typ == "frame":
frame_id = msg.frame.frameId
# Frame exists, make sure we have a framereader.
# load the frame readers as needed
s1 = time.time()
img = self._frame_reader.get(frame_id, pix_fmt="rgb24")
fr_time = time.time() - s1
if fr_time > 0.05:
print("FRAME(%d) LAG -- %.2f ms" % (frame_id, fr_time*1000.0))
if img is not None:
img = img[:, :, ::-1] # Convert RGB to BGR, which is what the camera outputs
img = img.flatten()
smsg.frame.image = img.tobytes()
data_socket.send_pyobj((cookie, typ, msg.logMonoTime, route_time), flags=zmq.SNDMORE)
data_socket.send(smsg.to_bytes(), copy=False)
def _process_commands(self, cmd, route, pub_types):
seek_to = None
if route is None or (isinstance(cmd, SetRoute) and route.name != cmd.name):
seek_to = cmd.start_time
route = Route(cmd.name, cmd.data_dir)
self._lr = MultiLogIterator(route.log_paths(), wraparound=True)
if self._frame_reader is not None:
self._frame_reader.close()
if "frame" in pub_types or "encodeIdx" in pub_types:
# reset frames for a route
self._frame_id_lookup = {}
self._frame_reader = RouteFrameReader(
route.camera_paths(), None, self._frame_id_lookup, readahead=True)
# always reset this on a seek
if isinstance(cmd, SeekRelativeTime):
seek_to = self._lr.tell() + cmd.secs
elif isinstance(cmd, SeekAbsoluteTime):
seek_to = cmd.secs
elif isinstance(cmd, StopAndQuit):
exit()
if seek_to is not None:
print("seeking", seek_to)
if not self._lr.seek(seek_to):
print("Can't seek: time out of bounds")
else:
next(self._lr) # ignore one
return route
def _get_address_send_func(address):
sock = pub_sock(address)
return sock.send
def unlogger_thread(command_address, forward_commands_address, data_address, run_realtime,
address_mapping, publish_time_length, bind_early, no_loop):
# Clear context to avoid problems with multiprocessing.
zmq.Context._instance = None
context = zmq.Context.instance()
command_sock = context.socket(zmq.PULL)
command_sock.bind(command_address)
forward_commands_socket = context.socket(zmq.PUSH)
forward_commands_socket.bind(forward_commands_address)
data_socket = context.socket(zmq.PULL)
data_socket.bind(data_address)
# Set readahead to a reasonable number.
data_socket.setsockopt(zmq.RCVHWM, 10000)
poller = zmq.Poller()
poller.register(command_sock, zmq.POLLIN)
poller.register(data_socket, zmq.POLLIN)
if bind_early:
send_funcs = {
typ: _get_address_send_func(address)
for typ, address in address_mapping.items()
}
# Give subscribers a chance to connect.
time.sleep(0.1)
else:
send_funcs = {}
start_time = float("inf")
printed_at = 0
generation = 0
paused = False
reset_time = True
prev_msg_time = None
while True:
evts = dict(poller.poll())
if command_sock in evts:
cmd = command_sock.recv_pyobj()
if isinstance(cmd, TogglePause):
paused = not paused
if paused:
poller.modify(data_socket, 0)
else:
poller.modify(data_socket, zmq.POLLIN)
else:
# Forward the command the the log data thread.
# TODO: Remove everything on data_socket.
generation += 1
forward_commands_socket.send_pyobj((generation, cmd))
if isinstance(cmd, StopAndQuit):
return
reset_time = True
elif data_socket in evts:
msg_generation, typ, msg_time, route_time = data_socket.recv_pyobj(flags=zmq.RCVMORE)
msg_bytes = data_socket.recv()
if msg_generation < generation:
# Skip packets.
continue
if no_loop and prev_msg_time is not None and prev_msg_time > msg_time + 1e9:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
prev_msg_time = msg_time
msg_time_seconds = msg_time * 1e-9
if reset_time:
msg_start_time = msg_time_seconds
real_start_time = realtime.sec_since_boot()
start_time = min(start_time, msg_start_time)
reset_time = False
if publish_time_length and msg_time_seconds - start_time > publish_time_length:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
# Print time.
if abs(printed_at - route_time) > 5.:
print("at", route_time)
printed_at = route_time
if typ not in send_funcs:
if typ in address_mapping:
# Remove so we don't keep printing warnings.
address = address_mapping.pop(typ)
try:
print("binding", typ)
send_funcs[typ] = _get_address_send_func(address)
except Exception as e:
print("couldn't replay {}: {}".format(typ, e))
continue
else:
# Skip messages that we are not registered to publish.
continue
# Sleep as needed for real time playback.
if run_realtime:
msg_time_offset = msg_time_seconds - msg_start_time
real_time_offset = realtime.sec_since_boot() - real_start_time
lag = msg_time_offset - real_time_offset
if lag > 0 and lag < 30: # a large jump is OK, likely due to an out of order segment
if lag > 1:
print("sleeping for", lag)
time.sleep(lag)
elif lag < -1:
# Relax the real time schedule when we slip far behind.
reset_time = True
# Send message.
try:
send_funcs[typ](msg_bytes)
except MultiplePublishersError:
del send_funcs[typ]
def timestamp_to_s(tss):
return time.mktime(datetime.strptime(tss, '%Y-%m-%d--%H-%M-%S').timetuple())
def absolute_time_str(s, start_time):
try:
# first try if it's a float
return float(s)
except ValueError:
# now see if it's a timestamp
return timestamp_to_s(s) - start_time
def _get_address_mapping(args):
if args.min is not None:
services_to_mock = [
'thermal', 'can', 'health', 'sensorEvents', 'gpsNMEA', 'frame', 'encodeIdx',
'model', 'features', 'liveLocation', 'gpsLocation'
]
elif args.enabled is not None:
services_to_mock = args.enabled
else:
services_to_mock = service_list.keys()
address_mapping = {service_name: service_name for service_name in services_to_mock}
address_mapping.update(dict(args.address_mapping))
for k in args.disabled:
address_mapping.pop(k, None)
non_services = set(address_mapping) - set(service_list)
if non_services:
print("WARNING: Unknown services {}".format(list(non_services)))
return address_mapping
def keyboard_controller_thread(q, route_start_time):
print("keyboard waiting for input")
kb = KBHit()
while 1:
c = kb.getch()
if c == 'm': # Move forward by 1m
q.send_pyobj(SeekRelativeTime(60))
elif c == 'M': # Move backward by 1m
q.send_pyobj(SeekRelativeTime(-60))
elif c == 's': # Move forward by 10s
q.send_pyobj(SeekRelativeTime(10))
elif c == 'S': # Move backward by 10s
q.send_pyobj(SeekRelativeTime(-10))
elif c == 'G': # Move backward by 10s
q.send_pyobj(SeekAbsoluteTime(0.))
elif c == "\x20": # Space bar.
q.send_pyobj(TogglePause())
elif c == "\n":
try:
seek_time_input = input('time: ')
seek_time = absolute_time_str(seek_time_input, route_start_time)
# If less than 60, assume segment number
if seek_time < 60:
seek_time *= 60
q.send_pyobj(SeekAbsoluteTime(seek_time))
except Exception as e:
print("Time not understood: {}".format(e))
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Mock openpilot components by publishing logged messages.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("route_name", type=(lambda x: x.replace("#", "|")), nargs="?",
help="The route whose messages will be published.")
parser.add_argument("data_dir", nargs='?', default=os.getenv('UNLOGGER_DATA_DIR'),
help="Path to directory in which log and camera files are located.")
parser.add_argument("--no-loop", action="store_true", help="Stop at the end of the replay.")
def key_value_pair(x):
return x.split("=")
parser.add_argument("address_mapping", nargs="*", type=key_value_pair,
help="Pairs <service>=<zmq_addr> to publish <service> on <zmq_addr>.")
def comma_list(x):
return x.split(",")
to_mock_group = parser.add_mutually_exclusive_group()
to_mock_group.add_argument("--min", action="store_true", default=os.getenv("MIN"))
to_mock_group.add_argument("--enabled", default=os.getenv("ENABLED"), type=comma_list)
parser.add_argument("--disabled", type=comma_list, default=os.getenv("DISABLED") or ())
parser.add_argument(
"--tl", dest="publish_time_length", type=float, default=None,
help="Length of interval in event time for which messages should be published.")
parser.add_argument(
"--no-realtime", dest="realtime", action="store_false", default=True,
help="Publish messages as quickly as possible instead of realtime.")
parser.add_argument(
"--no-interactive", dest="interactive", action="store_false", default=True,
help="Disable interactivity.")
parser.add_argument(
"--bind-early", action="store_true", default=False,
help="Bind early to avoid dropping messages.")
return parser
def main(argv):
args = get_arg_parser().parse_args(sys.argv[1:])
command_address = "ipc:///tmp/{}".format(uuid4())
forward_commands_address = "ipc:///tmp/{}".format(uuid4())
data_address = "ipc:///tmp/{}".format(uuid4())
address_mapping = _get_address_mapping(args)
command_sock = zmq.Context.instance().socket(zmq.PUSH)
command_sock.connect(command_address)
if args.route_name is not None:
route_name_split = args.route_name.split("|")
if len(route_name_split) > 1:
route_start_time = timestamp_to_s(route_name_split[1])
else:
route_start_time = 0
command_sock.send_pyobj(
SetRoute(args.route_name, 0, args.data_dir))
else:
print("waiting for external command...")
route_start_time = 0
subprocesses = {}
try:
subprocesses["data"] = multiprocessing.Process(
target=UnloggerWorker().run,
args=(forward_commands_address, data_address, address_mapping.copy()))
subprocesses["control"] = multiprocessing.Process(
target=unlogger_thread,
args=(command_address, forward_commands_address, data_address, args.realtime,
_get_address_mapping(args), args.publish_time_length, args.bind_early, args.no_loop))
subprocesses["data"].start()
subprocesses["control"].start()
# Exit if any of the children die.
def exit_if_children_dead(*_):
for _, p in subprocesses.items():
if not p.is_alive():
[p.terminate() for p in subprocesses.values()]
exit()
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGCHLD, exit_if_children_dead)
if args.interactive:
keyboard_controller_thread(command_sock, route_start_time)
else:
# Wait forever for children.
while True:
time.sleep(10000.)
finally:
for p in subprocesses.values():
if p.is_alive():
try:
p.join(3.)
except multiprocessing.TimeoutError:
p.terminate()
continue
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,490
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/lib/file_helpers.py
|
import os
from atomicwrites import AtomicWriter
def atomic_write_in_dir(path, **kwargs):
"""Creates an atomic writer using a temporary file in the same directory
as the destination file.
"""
writer = AtomicWriter(path, **kwargs)
return writer._open(_get_fileobject_func(writer, os.path.dirname(path)))
def _get_fileobject_func(writer, temp_dir):
def _get_fileobject():
file_obj = writer.get_fileobject(dir=temp_dir)
os.chmod(file_obj.name, 0o644)
return file_obj
return _get_fileobject
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,491
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/colors.py
|
# mypy: ignore-errors
class COLORS:
def __init__(self):
self.HEADER = '\033[95m'
self.OKBLUE = '\033[94m'
self.CBLUE = '\33[44m'
self.BOLD = '\033[1m'
self.CITALIC = '\33[3m'
self.OKGREEN = '\033[92m'
self.CWHITE = '\33[37m'
self.ENDC = '\033[0m' + self.CWHITE
self.UNDERLINE = '\033[4m'
self.PINK = '\33[38;5;207m'
self.PRETTY_YELLOW = self.BASE(220)
self.RED = '\033[91m'
self.PURPLE_BG = '\33[45m'
self.YELLOW = '\033[93m'
self.BLUE_GREEN = self.BASE(85)
self.FAIL = self.RED
# self.INFO = self.PURPLE_BG
self.INFO = self.BASE(207)
self.SUCCESS = self.OKGREEN
self.PROMPT = self.YELLOW
self.DBLUE = '\033[36m'
self.CYAN = self.BASE(39)
self.WARNING = '\033[33m'
def BASE(self, col): # seems to support more colors
return '\33[38;5;{}m'.format(col)
def BASEBG(self, col): # seems to support more colors
return '\33[48;5;{}m'.format(col)
COLORS = COLORS()
def opParams_warning(msg):
print('{}opParams WARNING: {}{}'.format(COLORS.WARNING, msg, COLORS.ENDC))
def opParams_error(msg):
print('{}opParams ERROR: {}{}'.format(COLORS.FAIL, msg, COLORS.ENDC))
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,492
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/dragonpilot/appd.py
|
#!/usr/bin/env python3.7
import time
import subprocess
import cereal
import cereal.messaging as messaging
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.swaglog import cloudlog
from common.params import Params, put_nonblocking
params = Params()
from math import floor
import re
import os
#from common.dp_common import is_online
from common.dp_conf import get_struct_name
from common.realtime import sec_since_boot
#is_online = is_online()
auto_update = params.get("dp_app_auto_update", encoding='utf8') == "1"
class App():
# app type
TYPE_GPS = 0
TYPE_SERVICE = 1
TYPE_FULLSCREEN = 2
TYPE_UTIL = 3
TYPE_ANDROID_AUTO = 4
# manual switch stats
MANUAL_OFF = -1
MANUAL_IDLE = 0
MANUAL_ON = 1
def appops_set(self, package, op, mode):
self.system(f"LD_LIBRARY_PATH= appops set {package} {op} {mode}")
def pm_grant(self, package, permission):
self.system(f"pm grant {package} {permission}")
def set_package_permissions(self):
if self.permissions is not None:
for permission in self.permissions:
self.pm_grant(self.app, permission)
if self.opts is not None:
for opt in self.opts:
self.appops_set(self.app, opt, "allow")
def __init__(self, app, start_cmd, enable_param, auto_run_param, manual_ctrl_param, app_type, check_crash, permissions, opts):
self.app = app
# main activity
self.start_cmd = start_cmd
# read enable param
self.enable_param = enable_param
self.enable_struct = get_struct_name(enable_param) if enable_param is not None else None
# read auto run param
self.auto_run_struct = get_struct_name(auto_run_param) if auto_run_param is not None else None
# read manual run param
self.manual_ctrl_param = manual_ctrl_param if manual_ctrl_param is not None else None
self.manual_ctrl_struct = get_struct_name(manual_ctrl_param) if manual_ctrl_param is not None else None
# if it's a service app, we do not kill if device is too hot
self.app_type = app_type
# app permissions
self.permissions = permissions
# app options
self.opts = opts
self.own_apk = "/sdcard/apks/" + self.app + ".apk"
self.has_own_apk = os.path.exists(self.own_apk)
self.is_installed = False
self.is_enabled = False
self.last_is_enabled = False
self.is_auto_runnable = False
self.is_running = False
self.manual_ctrl_status = self.MANUAL_IDLE
self.manually_ctrled = False
self.init = False
self.check_crash = check_crash
def is_crashed(self):
return getattr(self, self.enable_param + "_is_crashed")()
def dp_app_hr_is_crashed(self):
try:
result = subprocess.check_output(["dumpsys", "activity", "gb.xxy.hr"], encoding='utf8')
print("is_crash = %s" % "ACTIVITY" in result)
return "ACTIVITY" not in result
except (subprocess.CalledProcessError, IndexError):
return False
def get_remote_version(self):
apk = self.app + ".apk"
try:
url = "https://raw.githubusercontent.com/dragonpilot-community/apps/%s/VERSION" % apk
return subprocess.check_output(["curl", "-H", "'Cache-Control: no-cache'", "-s", url]).decode('utf8').rstrip()
except subprocess.CalledProcessError:
pass
return None
def uninstall_app(self):
try:
local_version = self.get_local_version()
if local_version is not None:
subprocess.check_output(["pm","uninstall", self.app])
self.is_installed = False
except subprocess.CalledProcessError:
pass
def update_app(self):
put_nonblocking('dp_is_updating', '1')
if self.has_own_apk:
try:
subprocess.check_output(["pm","install","-r",self.own_apk])
self.is_installed = True
except subprocess.CalledProcessError:
self.is_installed = False
else:
apk = self.app + ".apk"
apk_path = "/sdcard/" + apk
try:
os.remove(apk_path)
except (OSError, FileNotFoundError):
pass
self.uninstall_app()
# if local_version is not None:
# try:
# subprocess.check_output(["pm","uninstall", self.app], stderr=subprocess.STDOUT, shell=True)
# except subprocess.CalledProcessError :
# pass
try:
url = "https://raw.githubusercontent.com/dragonpilot-community/apps/%s/%s" % (apk, apk)
subprocess.check_output(["curl","-o", apk_path,"-LJO", url])
subprocess.check_output(["pm","install","-r",apk_path])
self.is_installed = True
except subprocess.CalledProcessError:
self.is_installed = False
try:
os.remove(apk_path)
except (OSError, FileNotFoundError):
pass
put_nonblocking('dp_is_updating', '0')
def get_local_version(self):
try:
result = subprocess.check_output(["dumpsys", "package", self.app, "|", "grep", "versionName"], encoding='utf8')
if len(result) > 12:
return re.findall(r"versionName=(.*)", result)[0]
except (subprocess.CalledProcessError, IndexError):
pass
return None
def init_vars(self, dragonconf):
self.is_enabled = getattr(dragonconf, self.enable_struct)
if self.is_enabled:
local_version = self.get_local_version()
if local_version is not None:
self.is_installed = True
if self.has_own_apk and not self.is_installed:
self.update_app()
#elif is_online:
if local_version is None:
self.update_app()
else:
remote_version = self.get_remote_version() if (auto_update and not self.own_apk) else local_version
if remote_version is not None and local_version != remote_version:
self.update_app()
if self.is_installed:
self.set_package_permissions()
else:
self.uninstall_app()
if self.manual_ctrl_param is not None and getattr(dragonconf, self.manual_ctrl_struct) != self.MANUAL_IDLE:
put_nonblocking(self.manual_ctrl_param, str(self.MANUAL_IDLE))
self.init = True
def read_params(self, dragonconf):
if not self.init:
self.init_vars(dragonconf)
self.last_is_enabled = self.is_enabled
self.is_enabled = False if self.enable_struct is None else getattr(dragonconf, self.enable_struct)
if self.is_installed:
if self.is_enabled:
# a service app should run automatically and not manual controllable.
if self.app_type in [App.TYPE_SERVICE]:
self.is_auto_runnable = True
self.manual_ctrl_status = self.MANUAL_IDLE
else:
self.manual_ctrl_status = self.MANUAL_IDLE if self.manual_ctrl_param is None else getattr(dragonconf, self.manual_ctrl_struct)
if self.manual_ctrl_status == self.MANUAL_IDLE:
self.is_auto_runnable = False if self.auto_run_struct is None else getattr(dragonconf, self.auto_run_struct)
else:
if self.last_is_enabled:
self.uninstall_app()
self.is_auto_runnable = False
self.manual_ctrl_status = self.MANUAL_IDLE
self.manually_ctrled = False
else:
if not self.last_is_enabled and self.is_enabled:
self.update_app()
def run(self, force = False):
if self.is_installed and (force or self.is_enabled):
# app is manually ctrl, we record that
if self.manual_ctrl_param is not None and self.manual_ctrl_status == self.MANUAL_ON:
put_nonblocking(self.manual_ctrl_param, '0')
put_nonblocking('dp_last_modified', str(floor(time.time())))
self.manually_ctrled = True
self.is_running = False
# only run app if it's not running
if force or not self.is_running:
self.system("pm enable %s" % self.app)
if self.app_type == self.TYPE_SERVICE:
self.appops_set(self.app, "android:mock_location", "allow")
self.system(self.start_cmd)
self.is_running = True
def kill(self, force = False):
if self.is_installed and (force or self.is_enabled):
# app is manually ctrl, we record that
if self.manual_ctrl_param is not None and self.manual_ctrl_status == self.MANUAL_OFF:
put_nonblocking(self.manual_ctrl_param, '0')
self.manually_ctrled = True
self.is_running = True
# only kill app if it's running
if force or self.is_running:
if self.app_type == self.TYPE_SERVICE:
self.appops_set(self.app, "android:mock_location", "deny")
self.system("pkill %s" % self.app)
self.is_running = False
def system(self, cmd):
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def init_apps(apps):
apps.append(App(
"cn.dragonpilot.gpsservice",
"am startservice cn.dragonpilot.gpsservice/cn.dragonpilot.gpsservice.MainService",
"dp_app_ext_gps",
None,
None,
App.TYPE_SERVICE,
False,
[],
[],
))
apps.append(App(
"com.mixplorer",
"am start -n com.mixplorer/com.mixplorer.activities.BrowseActivity",
"dp_app_mixplorer",
None,
"dp_app_mixplorer_manual",
App.TYPE_UTIL,
False,
[
"android.permission.READ_EXTERNAL_STORAGE",
"android.permission.WRITE_EXTERNAL_STORAGE",
],
[],
))
apps.append(App(
"com.tomtom.speedcams.android.map",
"am start -n com.tomtom.speedcams.android.map/com.tomtom.speedcams.android.activities.SpeedCamActivity",
"dp_app_tomtom",
"dp_app_tomtom_auto",
"dp_app_tomtom_manual",
App.TYPE_GPS,
False,
[
"android.permission.ACCESS_FINE_LOCATION",
"android.permission.ACCESS_COARSE_LOCATION",
"android.permission.READ_EXTERNAL_STORAGE",
"android.permission.WRITE_EXTERNAL_STORAGE",
],
[
"SYSTEM_ALERT_WINDOW",
]
))
apps.append(App(
"tw.com.ainvest.outpack",
"am start -n tw.com.ainvest.outpack/tw.com.ainvest.outpack.ui.MainActivity",
"dp_app_aegis",
"dp_app_aegis_auto",
"dp_app_aegis_manual",
App.TYPE_GPS,
False,
[
"android.permission.ACCESS_FINE_LOCATION",
"android.permission.READ_EXTERNAL_STORAGE",
"android.permission.WRITE_EXTERNAL_STORAGE",
],
[
"SYSTEM_ALERT_WINDOW",
]
))
apps.append(App(
"com.autonavi.amapauto",
"am start -n com.autonavi.amapauto/com.autonavi.amapauto.MainMapActivity",
"dp_app_autonavi",
"dp_app_autonavi_auto",
"dp_app_autonavi_manual",
App.TYPE_GPS,
False,
[
"android.permission.ACCESS_FINE_LOCATION",
"android.permission.ACCESS_COARSE_LOCATION",
"android.permission.READ_EXTERNAL_STORAGE",
"android.permission.WRITE_EXTERNAL_STORAGE",
],
[
"SYSTEM_ALERT_WINDOW",
]
))
apps.append(App(
"com.waze",
"am start -n com.waze/com.waze.MainActivity",
"dp_app_waze",
None,
"dp_app_waze_manual",
App.TYPE_FULLSCREEN,
False,
[
"android.permission.ACCESS_FINE_LOCATION",
"android.permission.ACCESS_COARSE_LOCATION",
"android.permission.READ_EXTERNAL_STORAGE",
"android.permission.WRITE_EXTERNAL_STORAGE",
"android.permission.RECORD_AUDIO",
],
[],
))
# pm disable gb.xxy.hr && pm enable gb.xxy.hr && am broadcast -a "gb.xxy.hr.WIFI_START"
apps.append(App(
"gb.xxy.hr",
"am start -n gb.xxy.hr/.MainActivity && pm disable gb.xxy.hr && pm enable gb.xxy.hr && am broadcast -a gb.xxy.hr.WIFI_START",
"dp_app_hr",
None,
"dp_app_hr_manual",
App.TYPE_ANDROID_AUTO,
True,
[
"android.permission.ACCESS_FINE_LOCATION",
"android.permission.ACCESS_COARSE_LOCATION",
"android.permission.READ_EXTERNAL_STORAGE",
"android.permission.WRITE_EXTERNAL_STORAGE",
"android.permission.RECORD_AUDIO",
],
[],
))
def main():
apps = []
last_started = False
sm = messaging.SubMaster(['dragonConf'])
frame = 0
start_delay = None
stop_delay = None
allow_auto_run = True
last_overheat = False
init_done = False
dragon_conf_msg = None
next_check_process_frame = 0
while 1: #has_enabled_apps:
start_sec = sec_since_boot()
if not init_done:
if frame >= 10:
init_apps(apps)
sm.update()
dragon_conf_msg = sm['dragonConf']
init_done = True
else:
sm.update(1000)
if sm.updated['dragonConf']:
dragon_conf_msg = sm['dragonConf']
else:
continue
enabled_apps = []
has_fullscreen_apps = False
has_check_crash = False
for app in apps:
# read params loop
app.read_params(dragon_conf_msg)
if app.last_is_enabled and not app.is_enabled and app.is_running:
app.kill(True)
if app.is_enabled:
if not has_fullscreen_apps and app.app_type in [App.TYPE_FULLSCREEN, App.TYPE_ANDROID_AUTO]:
has_fullscreen_apps = True
if not has_check_crash and app.check_crash:
has_check_crash = True
# process manual ctrl apps
if app.manual_ctrl_status != App.MANUAL_IDLE:
app.run(True) if app.manual_ctrl_status == App.MANUAL_ON else app.kill(True)
enabled_apps.append(app)
started = dragon_conf_msg.dpThermalStarted
# when car is running
if started:
# we run service apps and kill all util apps
# only run once
if last_started != started:
for app in enabled_apps:
if app.app_type in [App.TYPE_SERVICE]:
app.run()
elif app.app_type == App.TYPE_UTIL:
app.kill()
stop_delay = None
# apps start 5 secs later
if start_delay is None:
start_delay = frame + 5
if not dragon_conf_msg.dpThermalOverheat:
allow_auto_run = True
# when temp reduce from red to yellow, we add start up delay as well
# so apps will not start up immediately
if last_overheat:
start_delay = frame + 60
else:
allow_auto_run = False
last_overheat = dragon_conf_msg.dpThermalOverheat
# only run apps that's not manually ctrled
if has_check_crash and frame >= next_check_process_frame:
for app in enabled_apps:
if app.is_running and app.check_crash and app.is_crashed():
app.kill()
next_check_process_frame = frame + 15
for app in enabled_apps:
if not app.manually_ctrled:
if has_fullscreen_apps:
if app.app_type in [App.TYPE_FULLSCREEN, App.TYPE_ANDROID_AUTO]:
app.run()
elif app.app_type in [App.TYPE_GPS, App.TYPE_UTIL]:
app.kill()
else:
if not allow_auto_run:
app.kill()
else:
if frame >= start_delay and app.is_auto_runnable and app.app_type == App.TYPE_GPS:
app.run()
# when car is stopped
else:
start_delay = None
# set delay to 30 seconds
if stop_delay is None:
stop_delay = frame + 30
for app in enabled_apps:
if app.is_running and not app.manually_ctrled:
if has_fullscreen_apps or frame >= stop_delay:
app.kill()
if last_started != started:
for app in enabled_apps:
app.manually_ctrled = False
last_started = started
frame += 1
sleep = 1 - (sec_since_boot() - start_sec)
if sleep > 0:
time.sleep(sleep)
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,493
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/external/simpleperf/simpleperf_report_lib.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""simpleperf_report_lib.py: a python wrapper of libsimpleperf_report.so.
Used to access samples in perf.data.
"""
import ctypes as ct
import os
import subprocess
import sys
import unittest
from utils import *
def _get_native_lib():
return get_host_binary_path('libsimpleperf_report.so')
def _is_null(p):
if p:
return False
return ct.cast(p, ct.c_void_p).value is None
def _char_pt(s):
return str_to_bytes(s)
def _char_pt_to_str(char_pt):
return bytes_to_str(char_pt)
class SampleStruct(ct.Structure):
""" Instance of a sample in perf.data.
ip: the program counter of the thread generating the sample.
pid: process id (or thread group id) of the thread generating the sample.
tid: thread id.
thread_comm: thread name.
time: time at which the sample was generated. The value is in nanoseconds.
The clock is decided by the --clockid option in `simpleperf record`.
in_kernel: whether the instruction is in kernel space or user space.
cpu: the cpu generating the sample.
period: count of events have happened since last sample. For example, if we use
-e cpu-cycles, it means how many cpu-cycles have happened.
If we use -e cpu-clock, it means how many nanoseconds have passed.
"""
_fields_ = [('ip', ct.c_uint64),
('pid', ct.c_uint32),
('tid', ct.c_uint32),
('thread_comm', ct.c_char_p),
('time', ct.c_uint64),
('in_kernel', ct.c_uint32),
('cpu', ct.c_uint32),
('period', ct.c_uint64)]
class EventStruct(ct.Structure):
""" Name of the event. """
_fields_ = [('name', ct.c_char_p)]
class MappingStruct(ct.Structure):
""" A mapping area in the monitored threads, like the content in /proc/<pid>/maps.
start: start addr in memory.
end: end addr in memory.
pgoff: offset in the mapped shared library.
"""
_fields_ = [('start', ct.c_uint64),
('end', ct.c_uint64),
('pgoff', ct.c_uint64)]
class SymbolStruct(ct.Structure):
""" Symbol info of the instruction hit by a sample or a callchain entry of a sample.
dso_name: path of the shared library containing the instruction.
vaddr_in_file: virtual address of the instruction in the shared library.
symbol_name: name of the function containing the instruction.
symbol_addr: start addr of the function containing the instruction.
symbol_len: length of the function in the shared library.
mapping: the mapping area hit by the instruction.
"""
_fields_ = [('dso_name', ct.c_char_p),
('vaddr_in_file', ct.c_uint64),
('symbol_name', ct.c_char_p),
('symbol_addr', ct.c_uint64),
('symbol_len', ct.c_uint64),
('mapping', ct.POINTER(MappingStruct))]
class CallChainEntryStructure(ct.Structure):
""" A callchain entry of a sample.
ip: the address of the instruction of the callchain entry.
symbol: symbol info of the callchain entry.
"""
_fields_ = [('ip', ct.c_uint64),
('symbol', SymbolStruct)]
class CallChainStructure(ct.Structure):
""" Callchain info of a sample.
nr: number of entries in the callchain.
entries: a pointer to an array of CallChainEntryStructure.
For example, if a sample is generated when a thread is running function C
with callchain function A -> function B -> function C.
Then nr = 2, and entries = [function B, function A].
"""
_fields_ = [('nr', ct.c_uint32),
('entries', ct.POINTER(CallChainEntryStructure))]
class FeatureSectionStructure(ct.Structure):
""" A feature section in perf.data to store information like record cmd, device arch, etc.
data: a pointer to a buffer storing the section data.
data_size: data size in bytes.
"""
_fields_ = [('data', ct.POINTER(ct.c_char)),
('data_size', ct.c_uint32)]
# convert char_p to str for python3.
class SampleStructUsingStr(object):
def __init__(self, sample):
self.ip = sample.ip
self.pid = sample.pid
self.tid = sample.tid
self.thread_comm = _char_pt_to_str(sample.thread_comm)
self.time = sample.time
self.in_kernel = sample.in_kernel
self.cpu = sample.cpu
self.period = sample.period
class EventStructUsingStr(object):
def __init__(self, event):
self.name = _char_pt_to_str(event.name)
class SymbolStructUsingStr(object):
def __init__(self, symbol):
self.dso_name = _char_pt_to_str(symbol.dso_name)
self.vaddr_in_file = symbol.vaddr_in_file
self.symbol_name = _char_pt_to_str(symbol.symbol_name)
self.symbol_addr = symbol.symbol_addr
self.mapping = symbol.mapping
class CallChainEntryStructureUsingStr(object):
def __init__(self, entry):
self.ip = entry.ip
self.symbol = SymbolStructUsingStr(entry.symbol)
class CallChainStructureUsingStr(object):
def __init__(self, callchain):
self.nr = callchain.nr
self.entries = []
for i in range(self.nr):
self.entries.append(CallChainEntryStructureUsingStr(callchain.entries[i]))
class ReportLibStructure(ct.Structure):
_fields_ = []
class ReportLib(object):
def __init__(self, native_lib_path=None):
if native_lib_path is None:
native_lib_path = _get_native_lib()
self._load_dependent_lib()
self._lib = ct.CDLL(native_lib_path)
self._CreateReportLibFunc = self._lib.CreateReportLib
self._CreateReportLibFunc.restype = ct.POINTER(ReportLibStructure)
self._DestroyReportLibFunc = self._lib.DestroyReportLib
self._SetLogSeverityFunc = self._lib.SetLogSeverity
self._SetSymfsFunc = self._lib.SetSymfs
self._SetRecordFileFunc = self._lib.SetRecordFile
self._SetKallsymsFileFunc = self._lib.SetKallsymsFile
self._ShowIpForUnknownSymbolFunc = self._lib.ShowIpForUnknownSymbol
self._ShowArtFramesFunc = self._lib.ShowArtFrames
self._GetNextSampleFunc = self._lib.GetNextSample
self._GetNextSampleFunc.restype = ct.POINTER(SampleStruct)
self._GetEventOfCurrentSampleFunc = self._lib.GetEventOfCurrentSample
self._GetEventOfCurrentSampleFunc.restype = ct.POINTER(EventStruct)
self._GetSymbolOfCurrentSampleFunc = self._lib.GetSymbolOfCurrentSample
self._GetSymbolOfCurrentSampleFunc.restype = ct.POINTER(SymbolStruct)
self._GetCallChainOfCurrentSampleFunc = self._lib.GetCallChainOfCurrentSample
self._GetCallChainOfCurrentSampleFunc.restype = ct.POINTER(
CallChainStructure)
self._GetBuildIdForPathFunc = self._lib.GetBuildIdForPath
self._GetBuildIdForPathFunc.restype = ct.c_char_p
self._GetFeatureSection = self._lib.GetFeatureSection
self._GetFeatureSection.restype = ct.POINTER(FeatureSectionStructure)
self._instance = self._CreateReportLibFunc()
assert not _is_null(self._instance)
self.convert_to_str = (sys.version_info >= (3, 0))
self.meta_info = None
self.current_sample = None
self.record_cmd = None
def _load_dependent_lib(self):
# As the windows dll is built with mingw we need to load 'libwinpthread-1.dll'.
if is_windows():
self._libwinpthread = ct.CDLL(get_host_binary_path('libwinpthread-1.dll'))
def Close(self):
if self._instance is None:
return
self._DestroyReportLibFunc(self._instance)
self._instance = None
def SetLogSeverity(self, log_level='info'):
""" Set log severity of native lib, can be verbose,debug,info,error,fatal."""
cond = self._SetLogSeverityFunc(self.getInstance(), _char_pt(log_level))
self._check(cond, 'Failed to set log level')
def SetSymfs(self, symfs_dir):
""" Set directory used to find symbols."""
cond = self._SetSymfsFunc(self.getInstance(), _char_pt(symfs_dir))
self._check(cond, 'Failed to set symbols directory')
def SetRecordFile(self, record_file):
""" Set the path of record file, like perf.data."""
cond = self._SetRecordFileFunc(self.getInstance(), _char_pt(record_file))
self._check(cond, 'Failed to set record file')
def ShowIpForUnknownSymbol(self):
self._ShowIpForUnknownSymbolFunc(self.getInstance())
def ShowArtFrames(self, show=True):
""" Show frames of internal methods of the Java interpreter. """
self._ShowArtFramesFunc(self.getInstance(), show)
def SetKallsymsFile(self, kallsym_file):
""" Set the file path to a copy of the /proc/kallsyms file (for off device decoding) """
cond = self._SetKallsymsFileFunc(self.getInstance(), _char_pt(kallsym_file))
self._check(cond, 'Failed to set kallsyms file')
def GetNextSample(self):
psample = self._GetNextSampleFunc(self.getInstance())
if _is_null(psample):
self.current_sample = None
else:
sample = psample[0]
self.current_sample = SampleStructUsingStr(sample) if self.convert_to_str else sample
return self.current_sample
def GetCurrentSample(self):
return self.current_sample
def GetEventOfCurrentSample(self):
event = self._GetEventOfCurrentSampleFunc(self.getInstance())
assert not _is_null(event)
if self.convert_to_str:
return EventStructUsingStr(event[0])
return event[0]
def GetSymbolOfCurrentSample(self):
symbol = self._GetSymbolOfCurrentSampleFunc(self.getInstance())
assert not _is_null(symbol)
if self.convert_to_str:
return SymbolStructUsingStr(symbol[0])
return symbol[0]
def GetCallChainOfCurrentSample(self):
callchain = self._GetCallChainOfCurrentSampleFunc(self.getInstance())
assert not _is_null(callchain)
if self.convert_to_str:
return CallChainStructureUsingStr(callchain[0])
return callchain[0]
def GetBuildIdForPath(self, path):
build_id = self._GetBuildIdForPathFunc(self.getInstance(), _char_pt(path))
assert not _is_null(build_id)
return _char_pt_to_str(build_id)
def GetRecordCmd(self):
if self.record_cmd is not None:
return self.record_cmd
self.record_cmd = ''
feature_data = self._GetFeatureSection(self.getInstance(), _char_pt('cmdline'))
if not _is_null(feature_data):
void_p = ct.cast(feature_data[0].data, ct.c_void_p)
arg_count = ct.cast(void_p, ct.POINTER(ct.c_uint32)).contents.value
void_p.value += 4
args = []
for _ in range(arg_count):
str_len = ct.cast(void_p, ct.POINTER(ct.c_uint32)).contents.value
void_p.value += 4
char_p = ct.cast(void_p, ct.POINTER(ct.c_char))
current_str = ''
for j in range(str_len):
c = bytes_to_str(char_p[j])
if c != '\0':
current_str += c
if ' ' in current_str:
current_str = '"' + current_str + '"'
args.append(current_str)
void_p.value += str_len
self.record_cmd = ' '.join(args)
return self.record_cmd
def _GetFeatureString(self, feature_name):
feature_data = self._GetFeatureSection(self.getInstance(), _char_pt(feature_name))
result = ''
if not _is_null(feature_data):
void_p = ct.cast(feature_data[0].data, ct.c_void_p)
str_len = ct.cast(void_p, ct.POINTER(ct.c_uint32)).contents.value
void_p.value += 4
char_p = ct.cast(void_p, ct.POINTER(ct.c_char))
for i in range(str_len):
c = bytes_to_str(char_p[i])
if c == '\0':
break
result += c
return result
def GetArch(self):
return self._GetFeatureString('arch')
def MetaInfo(self):
""" Return a string to string map stored in meta_info section in perf.data.
It is used to pass some short meta information.
"""
if self.meta_info is None:
self.meta_info = {}
feature_data = self._GetFeatureSection(self.getInstance(), _char_pt('meta_info'))
if not _is_null(feature_data):
str_list = []
data = feature_data[0].data
data_size = feature_data[0].data_size
current_str = ''
for i in range(data_size):
c = bytes_to_str(data[i])
if c != '\0':
current_str += c
else:
str_list.append(current_str)
current_str = ''
for i in range(0, len(str_list), 2):
self.meta_info[str_list[i]] = str_list[i + 1]
return self.meta_info
def getInstance(self):
if self._instance is None:
raise Exception('Instance is Closed')
return self._instance
def _check(self, cond, failmsg):
if not cond:
raise Exception(failmsg)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,494
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/crash.py
|
"""Install exception handler for process crash."""
import os
import sys
import capnp
import requests
import threading
import traceback
import subprocess
from common.params import Params
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "common", "version.h")) as _versionf:
version = _versionf.read().split('"')[1]
def get_git_branch(default=None):
try:
return subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"], encoding='utf8').strip()
except subprocess.CalledProcessError:
return default
dirty = True
arne_remote = False
try:
local_branch = subprocess.check_output(["git", "name-rev", "--name-only", "HEAD"], encoding='utf8').strip()
tracking_remote = subprocess.check_output(["git", "config", "branch." + local_branch + ".remote"], encoding='utf8').strip()
origin = subprocess.check_output(["git", "config", "remote." + tracking_remote + ".url"], encoding='utf8').strip()
except subprocess.CalledProcessError:
try:
# Not on a branch, fallback
origin = subprocess.check_output(["git", "config", "--get", "remote.origin.url"], encoding='utf8').strip()
except subprocess.CalledProcessError:
origin = None
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"], encoding='utf8').strip()
if (origin is not None) and (branch is not None):
arne_remote = origin.startswith('git@github.com:arne182') or origin.startswith('https://github.com/arne182')
dirty = not arne_remote
dirty = dirty or (subprocess.call(["git", "diff-index", "--quiet", branch, "--"]) != 0)
from selfdrive.swaglog import cloudlog
from common.hardware import PC
def save_exception(exc_text):
i = 0
log_file = '{}/{}'.format(CRASHES_DIR, datetime.now().strftime('%Y-%m-%d--%H-%M-%S.%f.log')[:-3])
if os.path.exists(log_file):
while os.path.exists(log_file + str(i)):
i += 1
log_file += str(i)
with open(log_file, 'w') as f:
f.write(exc_text)
print('Logged current crash to {}'.format(log_file))
if os.getenv("NOLOG") or os.getenv("NOCRASH") or PC:
def capture_exception(*args, **kwargs):
pass
def bind_user(**kwargs):
pass
def bind_extra(**kwargs):
pass
def install():
pass
else:
from raven import Client
from raven.transport.http import HTTPTransport
from common.op_params import opParams
from datetime import datetime
COMMUNITY_DIR = '/data/community'
CRASHES_DIR = '{}/crashes'.format(COMMUNITY_DIR)
if not os.path.exists(COMMUNITY_DIR):
os.mkdir(COMMUNITY_DIR)
if not os.path.exists(CRASHES_DIR):
os.mkdir(CRASHES_DIR)
params = Params()
try:
dongle_id = params.get("DongleId").decode('utf8')
except AttributeError:
dongle_id = "None"
try:
ip = requests.get('https://checkip.amazonaws.com/').text.strip()
except Exception:
ip = "255.255.255.255"
error_tags = {'dirty': dirty, 'dongle_id': dongle_id, 'branch': branch, 'remote': origin}
#uniqueID = op_params.get('uniqueID')
username = opParams().get('username')
if username is None or not isinstance(username, str):
username = 'undefined'
error_tags['username'] = username
u_tag = []
if isinstance(username, str):
u_tag.append(username)
#if isinstance(uniqueID, str):
#u_tag.append(uniqueID)
if len(u_tag) > 0:
error_tags['username'] = ''.join(u_tag)
client = Client('https://137e8e621f114f858f4c392c52e18c6d:8aba82f49af040c8aac45e95a8484970@sentry.io/1404547',
install_sys_hook=False, transport=HTTPTransport, release=version, tags=error_tags)
def capture_exception(*args, **kwargs):
save_exception(traceback.format_exc())
exc_info = sys.exc_info()
if not exc_info[0] is capnp.lib.capnp.KjException:
client.captureException(*args, **kwargs)
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
def bind_user(**kwargs):
client.user_context(kwargs)
def capture_warning(warning_string):
bind_user(id=dongle_id, ip_address=ip)
client.captureMessage(warning_string, level='warning')
def capture_info(info_string):
bind_user(id=dongle_id, ip_address=ip)
client.captureMessage(info_string, level='info')
def bind_extra(**kwargs):
client.extra_context(kwargs)
def install():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
Source: https://stackoverflow.com/a/31622038
"""
# installs a sys.excepthook
__excepthook__ = sys.excepthook
def handle_exception(*exc_info):
if exc_info[0] not in (KeyboardInterrupt, SystemExit):
capture_exception()
__excepthook__(*exc_info)
sys.excepthook = handle_exception
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,495
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/controls/lib/dynamic_gas.py
|
from common.numpy_fast import clip, interp
# dp
DP_OFF = 0
DP_ECO = 1
DP_NORMAL = 2
DP_SPORT = 3
class DynamicGas:
def __init__(self, CP):
self.CP = CP
self.candidate = self.CP.carFingerprint
self.lead_data = {'v_rel': None, 'a_lead': None, 'x_lead': None, 'status': False}
self.blinker_status = False
self.dp_profile = DP_OFF
self.set_profile()
def update(self, CS, sm):
v_ego = CS.vEgo
self.handle_passable(CS, sm)
current_dp_profile = sm['dragonConf'].dpAccelProfile
if self.dp_profile != current_dp_profile:
self.dp_profile = current_dp_profile
self.set_profile()
if self.dp_profile == DP_OFF:
return float(interp(v_ego, self.CP.gasMaxBP, self.CP.gasMaxV))
gas = interp(v_ego, self.gasMaxBP, self.gasMaxV)
if self.lead_data['status']: # if lead
x = [0.0, 0.24588812499999999, 0.432818589, 0.593044697, 0.730381365, 1.050833588, 1.3965, 1.714627481] # relative velocity mod
y = [0.9901, 0.905, 0.8045, 0.625, 0.431, 0.2083, .0667, 0]
gas_mod = -(gas * interp(self.lead_data['v_rel'], x, y))
x = [0.44704, 1.1176, 1.34112] # lead accel mod
y = [1.0, 0.75, 0.625] # maximum we can reduce gas_mod is 40 percent (never increases mod)
gas_mod *= interp(self.lead_data['a_lead'], x, y)
# as lead gets further from car, lessen gas mod/reduction
x = [(i+v_ego) for i in self.x_lead_mod_x ]
gas_mod *= interp(self.lead_data['x_lead'],x , self.x_lead_mod_y)
gas = gas + gas_mod
if (self.blinker_status and self.lead_data['v_rel'] >= 0 ):
x = [8.9408, 22.352, 31.2928] # 20, 50, 70 mph
y = [1.0, 1.115, 1.225]
gas *= interp(v_ego, x, y)
return float(clip(gas, 0.0, 1.0))
def set_profile(self):
self.x_lead_mod_y = [1.0, 0.75, 0.5, 0.25, 0.0] # as lead gets further from car, lessen gas mod/reduction
x = [0.0, 1.4082, 2.80311, 4.22661, 5.38271, 6.16561, 7.24781, 8.28308, 10.24465, 12.96402, 15.42303, 18.11903, 20.11703, 24.46614, 29.05805, 32.71015, 35.76326]
if self.dp_profile == DP_ECO:
#km/h[0, 5, 10, 15, 19, 22, 25, 29, 36, 43, 54, 64, 72, 87, 104, 117, 128 144]
y = [0.38, 0.40, 0.38, 0.33, 0.33, 0.32, 0.31, 0.30, 0.27, 0.25, 0.24, 0.24, 0.23, 0.22, 0.22, 0.21, 0.20, 0.18]
#y = [0.35587, 0.46747, 0.41816, 0.33261, 0.27844, 0.2718, 0.28184, 0.29106, 0.29785, 0.297, 0.29658, 0.30308, 0.31354, 0.34922, 0.39767, 0.44527, 0.4984]
self.x_lead_mod_x = [8.1, 25, 45 , 65 , 70 ]
elif self.dp_profile == DP_SPORT:
#km/h[0, 5, 10, 15, 19, 22, 25, 29, 36, 43, 54, 64, 72, 87, 104, 117, 128 144]
y = [0.65, 0.67, 0.63, 0.50, 0.53, 0.53, 0.5229, 0.51784, 0.50765, 0.48, 0.496, 0.509, 0.525, 0.538, 0.45, 0.421, 0.42,0.35]
self.x_lead_mod_x = [4.1, 6.15, 8.24, 10 , 15 ]
else:
#km/h[0, 5, 10, 15, 19, 22, 25, 29, 36, 43, 54, 64, 72, 87, 104, 117, 128 144]
y = [0.45, 0.42, 0.38, 0.33, 0.3, 0.32, 0.31, 0.30, 0.30, 0.28, 0.24, 0.21, 0.20, 0.20, 0.19, 0.19, 0.17, 0.15]
self.x_lead_mod_x = [7.1, 10.15, 12.24, 15 , 20 ]
y = [interp(i, [0.2, (0.2 + 0.45) / 2, 0.45], [1.075 * i, i * 1.05, i]) for i in y]
self.gasMaxBP, self.gasMaxV = x, y
def handle_passable(self, CS, sm):
self.blinker_status = CS.leftBlinker or CS.rightBlinker
lead_one = sm['radarState'].leadOne
self.lead_data['v_rel'] = lead_one.vRel
self.lead_data['a_lead'] = lead_one.aLeadK
self.lead_data['x_lead'] = lead_one.dRel
self.lead_data['status'] = sm['plan'].hasLead # this fixes radarstate always reporting a lead, thanks to arne
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,496
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/lib/route_framereader.py
|
"""RouteFrameReader indexes and reads frames across routes, by frameId or segment indices."""
from tools.lib.framereader import FrameReader
class _FrameReaderDict(dict):
def __init__(self, camera_paths, cache_paths, framereader_kwargs, *args, **kwargs):
super(_FrameReaderDict, self).__init__(*args, **kwargs)
if cache_paths is None:
cache_paths = {}
if not isinstance(cache_paths, dict):
cache_paths = {k: v for k, v in enumerate(cache_paths)}
self._camera_paths = camera_paths
self._cache_paths = cache_paths
self._framereader_kwargs = framereader_kwargs
def __missing__(self, key):
if key < len(self._camera_paths) and self._camera_paths[key] is not None:
frame_reader = FrameReader(self._camera_paths[key],
self._cache_paths.get(key), **self._framereader_kwargs)
self[key] = frame_reader
return frame_reader
else:
raise KeyError("Segment index out of bounds: {}".format(key))
class RouteFrameReader(object):
"""Reads frames across routes and route segments by frameId."""
def __init__(self, camera_paths, cache_paths, frame_id_lookup, **kwargs):
"""Create a route framereader.
Inputs:
TODO
kwargs: Forwarded to the FrameReader function. If cache_prefix is included, that path
will also be used for frame position indices.
"""
self._first_camera_idx = next(i for i in range(len(camera_paths)) if camera_paths[i] is not None)
self._frame_readers = _FrameReaderDict(camera_paths, cache_paths, kwargs)
self._frame_id_lookup = frame_id_lookup
@property
def w(self):
"""Width of each frame in pixels."""
return self._frame_readers[self._first_camera_idx].w
@property
def h(self):
"""Height of each frame in pixels."""
return self._frame_readers[self._first_camera_idx].h
def get(self, frame_id, **kwargs):
"""Get a frame for a route based on frameId.
Inputs:
frame_id: The frameId of the returned frame.
kwargs: Forwarded to BaseFrameReader.get. "count" is not implemented.
"""
segment_num, segment_id = self._frame_id_lookup.get(frame_id, (None, None))
if segment_num is None or segment_num == -1 or segment_id == -1:
return None
else:
return self.get_from_segment(segment_num, segment_id, **kwargs)
def get_from_segment(self, segment_num, segment_id, **kwargs):
"""Get a frame from a specific segment with a specific index in that segment (segment_id).
Inputs:
segment_num: The number of the segment.
segment_id: The index of the return frame within that segment.
kwargs: Forwarded to BaseFrameReader.get. "count" is not implemented.
"""
if "count" in kwargs:
raise NotImplementedError("count")
return self._frame_readers[segment_num].get(segment_id, **kwargs)[0]
def close(self):
frs = self._frame_readers
self._frame_readers.clear()
for fr in frs:
fr.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,497
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/tests/test_ephemerides.py
|
import numpy as np
import unittest
from laika.gps_time import GPSTime
from laika import AstroDog
gps_times_list = [[1950, 415621.0],
[1895, 455457.0],
[1885, 443787.0]]
svIds = ['G01', 'G31', 'R08']
gps_times = [GPSTime(*gps_time_list) for gps_time_list in gps_times_list]
class TestAstroDog(unittest.TestCase):
'''
def test_nav_vs_orbit_now(self):
dog_orbit = AstroDog(pull_orbit=True)
dog_nav = AstroDog(pull_orbit=False)
gps_time = GPSTime.from_datetime(datetime.utcnow()) - SECS_IN_DAY*2
for svId in svIds:
sat_info_nav = dog_nav.get_sat_info(svId, gps_time)
sat_info_orbit = dog_orbit.get_sat_info(svId, gps_time)
np.testing.assert_allclose(sat_info_nav[0], sat_info_orbit[0], rtol=0, atol=5)
np.testing.assert_allclose(sat_info_nav[1], sat_info_orbit[1], rtol=0, atol=.1)
np.testing.assert_allclose(sat_info_nav[2], sat_info_orbit[2], rtol=0, atol=1e-7)
np.testing.assert_allclose(sat_info_nav[3], sat_info_orbit[3], rtol=0, atol=1e-11)
'''
def test_nav_vs_orbit__old(self):
dog_orbit = AstroDog(pull_orbit=True)
dog_nav = AstroDog(pull_orbit=False)
for gps_time in gps_times:
for svId in svIds:
sat_info_nav = dog_nav.get_sat_info(svId, gps_time)
sat_info_orbit = dog_orbit.get_sat_info(svId, gps_time)
np.testing.assert_allclose(sat_info_nav[0], sat_info_orbit[0], rtol=0, atol=5)
np.testing.assert_allclose(sat_info_nav[1], sat_info_orbit[1], rtol=0, atol=.1)
np.testing.assert_allclose(sat_info_nav[2], sat_info_orbit[2], rtol=0, atol=1e-7)
np.testing.assert_allclose(sat_info_nav[3], sat_info_orbit[3], rtol=0, atol=1e-11)
if __name__ == "__main__":
unittest.main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,498
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/testing_closet_client.py
|
#!/usr/bin/env python3
import errno
import fcntl
import os
import signal
import subprocess
import sys
import time
import requests
from common.params import Params
from common.timeout import Timeout
HOST = "testing.comma.life"
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
def heartbeat():
work_dir = '/data/openpilot'
while True:
try:
with open(os.path.join(work_dir, "selfdrive", "common", "version.h")) as _versionf:
version = _versionf.read().split('"')[1]
tmux = ""
# try:
# tmux = os.popen('tail -n 100 /tmp/tmux_out').read()
# except Exception:
# pass
params = Params()
msg = {
'version': version,
'dongle_id': params.get("DongleId").rstrip().decode('utf8'),
'remote': subprocess.check_output(["git", "config", "--get", "remote.origin.url"], cwd=work_dir).decode('utf8').rstrip(),
'branch': subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=work_dir).decode('utf8').rstrip(),
'revision': subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=work_dir).decode('utf8').rstrip(),
'serial': subprocess.check_output(["getprop", "ro.boot.serialno"]).decode('utf8').rstrip(),
'tmux': tmux,
}
with Timeout(10):
requests.post('http://%s/eon/heartbeat/' % HOST, json=msg, timeout=5.0)
except Exception as e:
print("Unable to send heartbeat", e)
time.sleep(5)
if __name__ == "__main__":
unblock_stdout()
heartbeat()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,499
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/model_replay.py
|
#!/usr/bin/env python3
import os
import numpy as np
from tools.lib.logreader import LogReader
from tools.lib.framereader import FrameReader
from tools.lib.cache import cache_path_for_file_path
from selfdrive.test.process_replay.camera_replay import camera_replay
if __name__ == "__main__":
lr = LogReader(os.path.expanduser('~/rlog.bz2'))
fr = FrameReader(os.path.expanduser('~/fcamera.hevc'))
desire = np.load(os.path.expanduser('~/desire.npy'))
calib = np.load(os.path.expanduser('~/calib.npy'))
try:
msgs = camera_replay(list(lr), fr, desire=desire, calib=calib)
finally:
cache_path = cache_path_for_file_path(os.path.expanduser('~/fcamera.hevc'))
if os.path.isfile(cache_path):
os.remove(cache_path)
output_size = len(np.frombuffer(msgs[0].model.rawPred, dtype=np.float32))
output_data = np.zeros((len(msgs), output_size), dtype=np.float32)
for i, msg in enumerate(msgs):
output_data[i] = np.frombuffer(msg.model.rawPred, dtype=np.float32)
np.save(os.path.expanduser('~/modeldata.npy'), output_data)
print("Finished replay")
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,500
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/webcam/accept_terms.py
|
#!/usr/bin/env python
from common.params import Params
from selfdrive.version import terms_version, training_version
if __name__ == '__main__':
params = Params()
params.put("HasAcceptedTerms", str(terms_version, 'utf-8'))
params.put("CompletedTrainingVersion", str(training_version, 'utf-8'))
print("Terms Accepted!")
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,501
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika_repo/examples/kalman/gnss_kf.py
|
#!/usr/bin/env python
# pylint: skip-file
import numpy as np
from .kalman_helpers import ObservationKind
from .ekf_sym import EKF_sym
from laika.raw_gnss import GNSSMeasurement
def parse_prr(m):
sat_pos_vel_i = np.concatenate((m[GNSSMeasurement.SAT_POS],
m[GNSSMeasurement.SAT_VEL]))
R_i = np.atleast_2d(m[GNSSMeasurement.PRR_STD]**2)
z_i = m[GNSSMeasurement.PRR]
return z_i, R_i, sat_pos_vel_i
def parse_pr(m):
pseudorange = m[GNSSMeasurement.PR]
pseudorange_stdev = m[GNSSMeasurement.PR_STD]
sat_pos_freq_i = np.concatenate((m[GNSSMeasurement.SAT_POS],
np.array([m[GNSSMeasurement.GLONASS_FREQ]])))
z_i = np.atleast_1d(pseudorange)
R_i = np.atleast_2d(pseudorange_stdev**2)
return z_i, R_i, sat_pos_freq_i
class States(object):
ECEF_POS = slice(0,3) # x, y and z in ECEF in meters
ECEF_VELOCITY = slice(3,6)
CLOCK_BIAS = slice(6, 7) # clock bias in light-meters,
CLOCK_DRIFT = slice(7, 8) # clock drift in light-meters/s,
CLOCK_ACCELERATION = slice(8, 9) # clock acceleration in light-meters/s**2
GLONASS_BIAS = slice(9, 10) # clock drift in light-meters/s,
GLONASS_FREQ_SLOPE = slice(10, 11) # GLONASS bias in m expressed as bias + freq_num*freq_slope
class GNSSKalman(object):
def __init__(self, N=0, max_tracks=3000):
x_initial = np.array([-2712700.6008, -4281600.6679, 3859300.1830,
0, 0, 0,
0, 0, 0,
0, 0])
# state covariance
P_initial = np.diag([10000**2, 10000**2, 10000**2,
10**2, 10**2, 10**2,
(2000000)**2, (100)**2, (0.5)**2,
(10)**2, (1)**2])
# process noise
Q = np.diag([0.3**2, 0.3**2, 0.3**2,
3**2, 3**2, 3**2,
(.1)**2, (0)**2, (0.01)**2,
.1**2, (.01)**2])
self.dim_state = x_initial.shape[0]
# mahalanobis outlier rejection
maha_test_kinds = [] # ObservationKind.PSEUDORANGE_RATE, ObservationKind.PSEUDORANGE, ObservationKind.PSEUDORANGE_GLONASS]
name = 'gnss'
# init filter
self.filter = EKF_sym(name, Q, x_initial, P_initial, self.dim_state, self.dim_state, maha_test_kinds=maha_test_kinds)
@property
def x(self):
return self.filter.state()
@property
def P(self):
return self.filter.covs()
def predict(self, t):
return self.filter.predict(t)
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=False)
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, data):
if len(data) > 0:
data = np.atleast_2d(data)
if kind == ObservationKind.PSEUDORANGE_GPS or kind == ObservationKind.PSEUDORANGE_GLONASS:
r = self.predict_and_update_pseudorange(data, t, kind)
elif kind == ObservationKind.PSEUDORANGE_RATE_GPS or kind == ObservationKind.PSEUDORANGE_RATE_GLONASS:
r = self.predict_and_update_pseudorange_rate(data, t, kind)
return r
def predict_and_update_pseudorange(self, meas, t, kind):
R = np.zeros((len(meas), 1, 1))
sat_pos_freq = np.zeros((len(meas), 4))
z = np.zeros((len(meas), 1))
for i, m in enumerate(meas):
z_i, R_i, sat_pos_freq_i = parse_pr(m)
sat_pos_freq[i,:] = sat_pos_freq_i
z[i,:] = z_i
R[i,:,:] = R_i
return self.filter.predict_and_update_batch(t, kind, z, R, sat_pos_freq)
def predict_and_update_pseudorange_rate(self, meas, t, kind):
R = np.zeros((len(meas), 1, 1))
z = np.zeros((len(meas), 1))
sat_pos_vel = np.zeros((len(meas), 6))
for i, m in enumerate(meas):
z_i, R_i, sat_pos_vel_i = parse_prr(m)
sat_pos_vel[i] = sat_pos_vel_i
R[i,:,:] = R_i
z[i, :] = z_i
return self.filter.predict_and_update_batch(t, kind, z, R, sat_pos_vel)
if __name__ == "__main__":
GNSSKalman()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,502
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/external/simpleperf/inferno/inferno.py
|
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Inferno is a tool to generate flamegraphs for android programs. It was originally written
to profile surfaceflinger (Android compositor) but it can be used for other C++ program.
It uses simpleperf to collect data. Programs have to be compiled with frame pointers which
excludes ART based programs for the time being.
Here is how it works:
1/ Data collection is started via simpleperf and pulled locally as "perf.data".
2/ The raw format is parsed, callstacks are merged to form a flamegraph data structure.
3/ The data structure is used to generate a SVG embedded into an HTML page.
4/ Javascript is injected to allow flamegraph navigation, search, coloring model.
"""
import argparse
import datetime
import os
import subprocess
import sys
scripts_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(scripts_path)
from simpleperf_report_lib import ReportLib
from utils import log_exit, log_info, AdbHelper, open_report_in_browser
from data_types import *
from svg_renderer import *
def collect_data(args):
""" Run app_profiler.py to generate record file. """
app_profiler_args = [sys.executable, os.path.join(scripts_path, "app_profiler.py"), "-nb"]
if args.app:
app_profiler_args += ["-p", args.app]
elif args.native_program:
app_profiler_args += ["-np", args.native_program]
else:
log_exit("Please set profiling target with -p or -np option.")
if args.compile_java_code:
app_profiler_args.append("--compile_java_code")
if args.disable_adb_root:
app_profiler_args.append("--disable_adb_root")
record_arg_str = ""
if args.dwarf_unwinding:
record_arg_str += "-g "
else:
record_arg_str += "--call-graph fp "
if args.events:
tokens = args.events.split()
if len(tokens) == 2:
num_events = tokens[0]
event_name = tokens[1]
record_arg_str += "-c %s -e %s " % (num_events, event_name)
else:
log_exit("Event format string of -e option cann't be recognized.")
log_info("Using event sampling (-c %s -e %s)." % (num_events, event_name))
else:
record_arg_str += "-f %d " % args.sample_frequency
log_info("Using frequency sampling (-f %d)." % args.sample_frequency)
record_arg_str += "--duration %d " % args.capture_duration
app_profiler_args += ["-r", record_arg_str]
returncode = subprocess.call(app_profiler_args)
return returncode == 0
def parse_samples(process, args, sample_filter_fn):
"""Read samples from record file.
process: Process object
args: arguments
sample_filter_fn: if not None, is used to modify and filter samples.
It returns false for samples should be filtered out.
"""
record_file = args.record_file
symfs_dir = args.symfs
kallsyms_file = args.kallsyms
lib = ReportLib()
lib.ShowIpForUnknownSymbol()
if symfs_dir:
lib.SetSymfs(symfs_dir)
if record_file:
lib.SetRecordFile(record_file)
if kallsyms_file:
lib.SetKallsymsFile(kallsyms_file)
if args.show_art_frames:
lib.ShowArtFrames(True)
process.cmd = lib.GetRecordCmd()
product_props = lib.MetaInfo().get("product_props")
if product_props:
tuple = product_props.split(':')
process.props['ro.product.manufacturer'] = tuple[0]
process.props['ro.product.model'] = tuple[1]
process.props['ro.product.name'] = tuple[2]
if lib.MetaInfo().get('trace_offcpu') == 'true':
process.props['trace_offcpu'] = True
if args.one_flamegraph:
log_exit("It doesn't make sense to report with --one-flamegraph for perf.data " +
"recorded with --trace-offcpu.""")
else:
process.props['trace_offcpu'] = False
while True:
sample = lib.GetNextSample()
if sample is None:
lib.Close()
break
symbol = lib.GetSymbolOfCurrentSample()
callchain = lib.GetCallChainOfCurrentSample()
if sample_filter_fn and not sample_filter_fn(sample, symbol, callchain):
continue
process.add_sample(sample, symbol, callchain)
if process.pid == 0:
main_threads = [thread for thread in process.threads.values() if thread.tid == thread.pid]
if main_threads:
process.name = main_threads[0].name
process.pid = main_threads[0].pid
for thread in process.threads.values():
min_event_count = thread.num_events * args.min_callchain_percentage * 0.01
thread.flamegraph.trim_callchain(min_event_count)
log_info("Parsed %s callchains." % process.num_samples)
def get_local_asset_content(local_path):
"""
Retrieves local package text content
:param local_path: str, filename of local asset
:return: str, the content of local_path
"""
with open(os.path.join(os.path.dirname(__file__), local_path), 'r') as f:
return f.read()
def output_report(process, args):
"""
Generates a HTML report representing the result of simpleperf sampling as flamegraph
:param process: Process object
:return: str, absolute path to the file
"""
f = open(args.report_path, 'w')
filepath = os.path.realpath(f.name)
if not args.embedded_flamegraph:
f.write("<html><body>")
f.write("<div id='flamegraph_id' style='font-family: Monospace; %s'>" % (
"display: none;" if args.embedded_flamegraph else ""))
f.write("""<style type="text/css"> .s { stroke:black; stroke-width:0.5; cursor:pointer;}
</style>""")
f.write('<style type="text/css"> .t:hover { cursor:pointer; } </style>')
f.write('<img height="180" alt = "Embedded Image" src ="data')
f.write(get_local_asset_content("inferno.b64"))
f.write('"/>')
process_entry = ("Process : %s (%d)<br/>" % (process.name, process.pid)) if process.pid else ""
if process.props['trace_offcpu']:
event_entry = 'Total time: %s<br/>' % get_proper_scaled_time_string(process.num_events)
else:
event_entry = 'Event count: %s<br/>' % ("{:,}".format(process.num_events))
# TODO: collect capture duration info from perf.data.
duration_entry = ("Duration: %s seconds<br/>" % args.capture_duration
) if args.capture_duration else ""
f.write("""<div style='display:inline-block;'>
<font size='8'>
Inferno Flamegraph Report%s</font><br/><br/>
%s
Date : %s<br/>
Threads : %d <br/>
Samples : %d<br/>
%s
%s""" % (
(': ' + args.title) if args.title else '',
process_entry,
datetime.datetime.now().strftime("%Y-%m-%d (%A) %H:%M:%S"),
len(process.threads),
process.num_samples,
event_entry,
duration_entry))
if 'ro.product.model' in process.props:
f.write(
"Machine : %s (%s) by %s<br/>" %
(process.props["ro.product.model"],
process.props["ro.product.name"],
process.props["ro.product.manufacturer"]))
if process.cmd:
f.write("Capture : %s<br/><br/>" % process.cmd)
f.write("</div>")
f.write("""<br/><br/>
<div>Navigate with WASD, zoom in with SPACE, zoom out with BACKSPACE.</div>""")
f.write("<script>%s</script>" % get_local_asset_content("script.js"))
if not args.embedded_flamegraph:
f.write("<script>document.addEventListener('DOMContentLoaded', flamegraphInit);</script>")
# Sort threads by the event count in a thread.
for thread in sorted(process.threads.values(), key=lambda x: x.num_events, reverse=True):
f.write("<br/><br/><b>Thread %d (%s) (%d samples):</b><br/>\n\n\n\n" % (
thread.tid, thread.name, thread.num_samples))
renderSVG(process, thread.flamegraph, f, args.color)
f.write("</div>")
if not args.embedded_flamegraph:
f.write("</body></html")
f.close()
return "file://" + filepath
def generate_threads_offsets(process):
for thread in process.threads.values():
thread.flamegraph.generate_offset(0)
def collect_machine_info(process):
adb = AdbHelper()
process.props = {}
process.props['ro.product.model'] = adb.get_property('ro.product.model')
process.props['ro.product.name'] = adb.get_property('ro.product.name')
process.props['ro.product.manufacturer'] = adb.get_property('ro.product.manufacturer')
def main():
# Allow deep callchain with length >1000.
sys.setrecursionlimit(1500)
parser = argparse.ArgumentParser(description="""Report samples in perf.data. Default option
is: "-np surfaceflinger -f 6000 -t 10".""")
record_group = parser.add_argument_group('Record options')
record_group.add_argument('-du', '--dwarf_unwinding', action='store_true', help="""Perform
unwinding using dwarf instead of fp.""")
record_group.add_argument('-e', '--events', default="", help="""Sample based on event
occurences instead of frequency. Format expected is
"event_counts event_name". e.g: "10000 cpu-cyles". A few examples
of event_name: cpu-cycles, cache-references, cache-misses,
branch-instructions, branch-misses""")
record_group.add_argument('-f', '--sample_frequency', type=int, default=6000, help="""Sample
frequency""")
record_group.add_argument('--compile_java_code', action='store_true',
help="""On Android N and Android O, we need to compile Java code
into native instructions to profile Java code. Android O
also needs wrap.sh in the apk to use the native
instructions.""")
record_group.add_argument('-np', '--native_program', default="surfaceflinger", help="""Profile
a native program. The program should be running on the device.
Like -np surfaceflinger.""")
record_group.add_argument('-p', '--app', help="""Profile an Android app, given the package
name. Like -p com.example.android.myapp.""")
record_group.add_argument('--record_file', default='perf.data', help='Default is perf.data.')
record_group.add_argument('-sc', '--skip_collection', action='store_true', help="""Skip data
collection""")
record_group.add_argument('-t', '--capture_duration', type=int, default=10, help="""Capture
duration in seconds.""")
report_group = parser.add_argument_group('Report options')
report_group.add_argument('-c', '--color', default='hot', choices=['hot', 'dso', 'legacy'],
help="""Color theme: hot=percentage of samples, dso=callsite DSO
name, legacy=brendan style""")
report_group.add_argument('--embedded_flamegraph', action='store_true', help="""Generate
embedded flamegraph.""")
report_group.add_argument('--kallsyms', help='Set the path to find kernel symbols.')
report_group.add_argument('--min_callchain_percentage', default=0.01, type=float, help="""
Set min percentage of callchains shown in the report.
It is used to limit nodes shown in the flamegraph. For example,
when set to 0.01, only callchains taking >= 0.01%% of the event
count of the owner thread are collected in the report.""")
report_group.add_argument('--no_browser', action='store_true', help="""Don't open report
in browser.""")
report_group.add_argument('-o', '--report_path', default='report.html', help="""Set report
path.""")
report_group.add_argument('--one-flamegraph', action='store_true', help="""Generate one
flamegraph instead of one for each thread.""")
report_group.add_argument('--symfs', help="""Set the path to find binaries with symbols and
debug info.""")
report_group.add_argument('--title', help='Show a title in the report.')
report_group.add_argument('--show_art_frames', action='store_true',
help='Show frames of internal methods in the ART Java interpreter.')
debug_group = parser.add_argument_group('Debug options')
debug_group.add_argument('--disable_adb_root', action='store_true', help="""Force adb to run
in non root mode.""")
args = parser.parse_args()
process = Process("", 0)
if not args.skip_collection:
process.name = args.app or args.native_program
log_info("Starting data collection stage for process '%s'." % process.name)
if not collect_data(args):
log_exit("Unable to collect data.")
result, output = AdbHelper().run_and_return_output(['shell', 'pidof', process.name])
if result:
try:
process.pid = int(output)
except:
process.pid = 0
collect_machine_info(process)
else:
args.capture_duration = 0
sample_filter_fn = None
if args.one_flamegraph:
def filter_fn(sample, symbol, callchain):
sample.pid = sample.tid = process.pid
return True
sample_filter_fn = filter_fn
if not args.title:
args.title = ''
args.title += '(One Flamegraph)'
parse_samples(process, args, sample_filter_fn)
generate_threads_offsets(process)
report_path = output_report(process, args)
if not args.no_browser:
open_report_in_browser(report_path)
log_info("Flamegraph generated at '%s'." % report_path)
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,503
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/laika/astro_dog.py
|
from .helpers import get_constellation, get_closest, get_el_az, get_prns_from_constellation
from .ephemeris import parse_sp3_orbits, parse_rinex_nav_msg_gps, parse_rinex_nav_msg_glonass
from .downloader import download_orbits, download_orbits_russia, download_nav, download_ionex, download_dcb
from .downloader import download_cors_station
from .trop import saast
from .iono import parse_ionex
from .dcb import parse_dcbs
from .dgps import get_closest_station_names, parse_dgps
from . import constants
MAX_DGPS_DISTANCE = 100000 # in meters, because we're not barbarians
class AstroDog(object):
'''
auto_update: flag indicating whether laika should fetch files from web
cache_dir: directory where data files are downloaded to and cached
pull_orbit: flag indicating whether laika should fetch sp3 orbits
instead of nav files (orbits are more accurate)
dgps: flag indicating whether laika should use dgps (CORS)
data to calculate pseudorange corrections
valid_const: list of constellation identifiers laika will try process
'''
def __init__(self, auto_update=True,
cache_dir='/tmp/gnss/',
pull_orbit=True, dgps=False,
valid_const=['GPS', 'GLONASS']):
self.auto_update = auto_update
self.orbits = {}
self.nav = {}
self.dcbs = {}
self.cache_dir = cache_dir
self.dgps = dgps
self.dgps_delays = []
self.bad_sats = []
self.ionex_maps = []
self.pull_orbit = pull_orbit
self.cached_orbit = {}
self.cached_nav = {}
self.cached_dcb = {}
self.cached_ionex = None
self.cached_dgps = None
self.valid_const = valid_const
prns = sum([get_prns_from_constellation(const) for const in self.valid_const], [])
for prn in prns:
self.cached_nav[prn] = None
self.cached_orbit[prn] = None
self.cached_dcb[prn] = None
self.orbits[prn] = []
self.dcbs[prn] = []
self.nav[prn] = []
def get_ionex(self, time):
if self.cached_ionex is not None and self.cached_ionex.valid(time):
return self.cached_ionex
self.cached_ionex = get_closest(time, self.ionex_maps)
if self.cached_ionex is not None and self.cached_ionex.valid(time):
return self.cached_ionex
self.get_ionex_data(time)
self.cached_ionex = get_closest(time, self.ionex_maps)
if self.cached_ionex is not None and self.cached_ionex.valid(time):
return self.cached_ionex
elif self.auto_update:
raise RuntimeError("Pulled ionex, but still can't get valid for time " + str(time))
else:
return None
def get_nav(self, prn, time):
if self.cached_nav[prn] is not None and self.cached_nav[prn].valid(time):
return self.cached_nav[prn]
self.cached_nav[prn] = get_closest(time, self.nav[prn])
if self.cached_nav[prn] is not None and self.cached_nav[prn].valid(time):
return self.cached_nav[prn]
self.get_nav_data(time)
self.cached_nav[prn] = get_closest(time, self.nav[prn])
if self.cached_nav[prn] is not None and self.cached_nav[prn].valid(time):
return self.cached_nav[prn]
else:
self.bad_sats.append(prn)
return None
def get_orbit(self, prn, time):
if self.cached_orbit[prn] is not None and self.cached_orbit[prn].valid(time):
return self.cached_orbit[prn]
self.cached_orbit[prn] = get_closest(time, self.orbits[prn])
if self.cached_orbit[prn] is not None and self.cached_orbit[prn].valid(time):
return self.cached_orbit[prn]
self.get_orbit_data(time)
self.cached_orbit[prn] = get_closest(time, self.orbits[prn])
if self.cached_orbit[prn] is not None and self.cached_orbit[prn].valid(time):
return self.cached_orbit[prn]
else:
self.bad_sats.append(prn)
return None
def get_dcb(self, prn, time):
if self.cached_dcb[prn] is not None and self.cached_dcb[prn].valid(time):
return self.cached_dcb[prn]
self.cached_dcb[prn] = get_closest(time, self.dcbs[prn])
if self.cached_dcb[prn] is not None and self.cached_dcb[prn].valid(time):
return self.cached_dcb[prn]
self.get_dcb_data(time)
self.cached_dcb[prn] = get_closest(time, self.dcbs[prn])
if self.cached_dcb[prn] is not None and self.cached_dcb[prn].valid(time):
return self.cached_dcb[prn]
else:
self.bad_sats.append(prn)
return None
def get_dgps_corrections(self, time, recv_pos):
if self.cached_dgps is not None and self.cached_dgps.valid(time, recv_pos):
return self.cached_dgps
self.cached_dgps = get_closest(time, self.dgps_delays, recv_pos=recv_pos)
if self.cached_dgps is not None and self.cached_dgps.valid(time, recv_pos):
return self.cached_dgps
self.get_dgps_data(time, recv_pos)
self.cached_dgps = get_closest(time, self.dgps_delays, recv_pos=recv_pos)
if self.cached_dgps is not None and self.cached_dgps.valid(time, recv_pos):
return self.cached_dgps
elif self.auto_update:
raise RuntimeError("Pulled dgps, but still can't get valid for time " + str(time))
else:
return None
def add_ephem(self, new_ephem, ephems):
prn = new_ephem.prn
# TODO make this check work
#for eph in ephems[prn]:
# if eph.type == new_ephem.type and eph.epoch == new_ephem.epoch:
# raise RuntimeError('Trying to add an ephemeris that is already there, something is wrong')
ephems[prn].append(new_ephem)
def get_nav_data(self, time):
ephems_gps, ephems_glonass = [], []
if 'GPS' in self.valid_const:
file_path_gps = download_nav(time, cache_dir=self.cache_dir, constellation='GPS')
if file_path_gps:
ephems_gps = parse_rinex_nav_msg_gps(file_path_gps)
if 'GLONASS' in self.valid_const:
file_path_glonass = download_nav(time, cache_dir=self.cache_dir, constellation='GLONASS')
if file_path_glonass:
ephems_glonass = parse_rinex_nav_msg_glonass(file_path_glonass)
for ephem in (ephems_gps + ephems_glonass):
self.add_ephem(ephem, self.nav)
detected_prns = set([e.prn for e in ephems_gps + ephems_glonass])
for constellation in self.valid_const:
for prn in get_prns_from_constellation(constellation):
if prn not in detected_prns and prn not in self.bad_sats:
print('No nav data found for prn : %s flagging as bad' % prn)
self.bad_sats.append(prn)
def get_orbit_data(self, time):
file_paths_sp3_ru = download_orbits_russia(time, cache_dir=self.cache_dir)
ephems_sp3_ru = parse_sp3_orbits(file_paths_sp3_ru, self.valid_const)
file_paths_sp3_us = download_orbits(time, cache_dir=self.cache_dir)
ephems_sp3_us = parse_sp3_orbits(file_paths_sp3_us, self.valid_const)
ephems_sp3 = ephems_sp3_ru + ephems_sp3_us
if len(ephems_sp3) < 5:
raise RuntimeError('No orbit data found on either servers')
for ephem in ephems_sp3:
self.add_ephem(ephem, self.orbits)
for constellation in self.valid_const:
for prn in get_prns_from_constellation(constellation):
closest = get_closest(time, self.orbits[prn])
if ((closest is None) or ((closest is not None) and (not closest.valid(time)))) and (prn not in self.bad_sats):
print('No orbit data found for prn : %s flagging as bad' % prn)
self.bad_sats.append(prn)
def get_dcb_data(self, time):
file_path_dcb = download_dcb(time, cache_dir=self.cache_dir)
dcbs = parse_dcbs(file_path_dcb, self.valid_const)
for dcb in dcbs:
self.dcbs[dcb.prn].append(dcb)
detected_prns = set([dcb.prn for dcb in dcbs])
for constellation in self.valid_const:
for prn in get_prns_from_constellation(constellation):
if prn not in detected_prns and prn not in self.bad_sats:
print('No dcb data found for prn : %s flagging as bad' % prn)
self.bad_sats.append(prn)
def get_ionex_data(self, time):
file_path_ionex = download_ionex(time, cache_dir=self.cache_dir)
ionex_maps = parse_ionex(file_path_ionex)
for im in ionex_maps:
self.ionex_maps.append(im)
def get_dgps_data(self, time, recv_pos):
station_names = get_closest_station_names(recv_pos, k=8, max_distance=MAX_DGPS_DISTANCE, cache_dir=self.cache_dir)
for station_name in station_names:
file_path_station = download_cors_station(time, station_name, cache_dir=self.cache_dir)
if file_path_station:
dgps = parse_dgps(station_name, file_path_station,
self, max_distance=MAX_DGPS_DISTANCE,
required_constellations=self.valid_const)
if dgps is not None:
self.dgps_delays.append(dgps)
break
def get_tgd_from_nav(self, prn, time):
if prn in self.bad_sats:
return None
if get_constellation(prn) not in self.valid_const:
return None
eph = self.get_nav(prn, time)
if eph:
return eph.get_tgd()
else:
return None
def get_sat_info(self, prn, time):
if prn in self.bad_sats:
return None
if get_constellation(prn) not in self.valid_const:
return None
if self.pull_orbit:
eph = self.get_orbit(prn, time)
else:
eph = self.get_nav(prn, time)
if eph:
return eph.get_sat_info(time)
else:
return None
def get_glonass_channel(self, prn, time):
nav = self.get_nav(prn, time)
if nav:
return nav.channel
def get_frequency(self, prn, time, signal='C1C'):
if get_constellation(prn) == 'GPS':
if signal[1] == '1':
return constants.GPS_L1
elif signal[1] == '2':
return constants.GPS_L2
elif signal[1] == '5':
return constants.GPS_L5
elif signal[1] == '6':
return constants.GALILEO_E6
elif signal[1] == '7':
return constants.GALILEO_E5B
elif signal[1] == '8':
return constants.GALILEO_E5AB
else:
raise NotImplementedError('Dont know this GPS frequency: ', signal, prn)
elif get_constellation(prn) == 'GLONASS':
n = self.get_glonass_channel(prn, time)
if signal[1] == '1':
return constants.GLONASS_L1 + n * constants.GLONASS_L1_DELTA
if signal[1] == '2':
return constants.GLONASS_L2 + n * constants.GLONASS_L2_DELTA
if signal[1] == '5':
return constants.GLONASS_L5 + n * constants.GLONASS_L5_DELTA
if signal[1] == '6':
return constants.GALILEO_E6
if signal[1] == '7':
return constants.GALILEO_E5B
if signal[1] == '8':
return constants.GALILEO_E5AB
else:
raise NotImplementedError('Dont know this GLONASS frequency: ', signal, prn)
def get_delay(self, prn, time, rcv_pos, no_dgps=False, signal='C1C', freq=None):
sat_info = self.get_sat_info(prn, time)
if sat_info is None:
return None
sat_pos = sat_info[0]
el, az = get_el_az(rcv_pos, sat_pos)
if el < 0.2:
return None
if self.dgps and not no_dgps:
dgps_corrections = self.get_dgps_corrections(time, rcv_pos)
if dgps_corrections is None:
return None
dgps_delay = dgps_corrections.get_delay(prn, time)
if dgps_delay is None:
return None
return dgps_corrections.get_delay(prn, time)
else:
if not freq:
freq = self.get_frequency(prn, time, signal)
ionex = self.get_ionex(time)
dcb = self.get_dcb(prn, time)
if ionex is None or dcb is None:
return None
iono_delay = ionex.get_delay(rcv_pos, az, el, sat_pos, time, freq)
trop_delay = saast(rcv_pos, el)
code_bias = dcb.get_delay(signal)
return iono_delay + trop_delay + code_bias
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,504
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/streamer/streamerd.py
|
#!/usr/bin/env python
# pylint: skip-file
import os
import sys
import zmq
import cv2
import numpy as np
import struct
# sudo pip install git+git://github.com/mikeboers/PyAV.git
import av
import cereal.messaging as messaging
from cereal.services import service_list
PYGAME = os.getenv("PYGAME") is not None
if PYGAME:
import pygame
imgff = np.zeros((874, 1164, 3), dtype=np.uint8)
# first 74 bytes in any stream
start = "0000000140010c01ffff016000000300b0000003000003005dac5900000001420101016000000300b0000003000003005da0025080381c5c665aee4c92ec80000000014401c0f1800420"
def receiver_thread():
if PYGAME:
pygame.init()
pygame.display.set_caption("vnet debug UI")
screen = pygame.display.set_mode((1164, 874), pygame.DOUBLEBUF)
camera_surface = pygame.surface.Surface((1164, 874), 0, 24).convert()
addr = "192.168.5.11"
if len(sys.argv) >= 2:
addr = sys.argv[1]
context = zmq.Context()
s = messaging.sub_sock(context, 9002, addr=addr)
frame_sock = messaging.pub_sock(context, service_list['frame'].port)
ctx = av.codec.codec.Codec('hevc', 'r').create()
ctx.decode(av.packet.Packet(start.decode("hex")))
# import time
while 1:
# t1 = time.time()
ts, raw = s.recv_multipart()
ts = struct.unpack('q', ts)[0] * 1000
# t1, t2 = time.time(), t1
#print 'ms to get frame:', (t1-t2)*1000
pkt = av.packet.Packet(raw)
f = ctx.decode(pkt)
if not f:
continue
f = f[0]
# t1, t2 = time.time(), t1
#print 'ms to decode:', (t1-t2)*1000
y_plane = np.frombuffer(f.planes[0], np.uint8).reshape((874, 1216))[:, 0:1164]
u_plane = np.frombuffer(f.planes[1], np.uint8).reshape((437, 608))[:, 0:582]
v_plane = np.frombuffer(f.planes[2], np.uint8).reshape((437, 608))[:, 0:582]
yuv_img = y_plane.tobytes() + u_plane.tobytes() + v_plane.tobytes()
# t1, t2 = time.time(), t1
#print 'ms to make yuv:', (t1-t2)*1000
#print 'tsEof:', ts
dat = messaging.new_message('frame')
dat.frame.image = yuv_img
dat.frame.timestampEof = ts
dat.frame.transform = map(float, list(np.eye(3).flatten()))
frame_sock.send(dat.to_bytes())
if PYGAME:
yuv_np = np.frombuffer(yuv_img, dtype=np.uint8).reshape(874 * 3 // 2, -1)
cv2.cvtColor(yuv_np, cv2.COLOR_YUV2RGB_I420, dst=imgff)
#print yuv_np.shape, imgff.shape
#scipy.misc.imsave("tmp.png", imgff)
pygame.surfarray.blit_array(camera_surface, imgff.swapaxes(0, 1))
screen.blit(camera_surface, (0, 0))
pygame.display.flip()
def main(gctx=None):
receiver_thread()
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,505
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/mapd/mapd.py
|
#!/usr/bin/env python3
#pylint: skip-file
# flake8: noqa
import time
import math
import overpy
import socket
import requests
import threading
import numpy as np
# setup logging
import logging
import logging.handlers
from scipy import spatial
import selfdrive.crash as crash
from common.params import Params
from collections import defaultdict
import cereal.messaging as messaging
#import cereal.messaging_arne as messaging_arne
from selfdrive.version import version, dirty
from common.transformations.coordinates import geodetic2ecef
from selfdrive.mapd.mapd_helpers import MAPS_LOOKAHEAD_DISTANCE, Way, circle_through_points, rate_curvature_points
#DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json"
#from selfdrive.mapd import default_speeds_generator
#default_speeds_generator.main(DEFAULT_SPEEDS_BY_REGION_JSON_FILE)
# define LoggerThread class to implement logging functionality
class LoggerThread(threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.logger = logging.getLogger(name)
h = logging.handlers.RotatingFileHandler(str(name)+'-Thread.log', 'a', 10*1024*1024, 5)
f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
h.setFormatter(f)
self.logger.addHandler(h)
self.logger.setLevel(logging.CRITICAL) # set to logging.DEBUG to enable logging
# self.logger.setLevel(logging.DEBUG) # set to logging.CRITICAL to disable logging
def save_gps_data(self, gps, osm_way_id):
try:
location = [gps.speed, gps.bearing, gps.latitude, gps.longitude, gps.altitude, gps.accuracy, time.time(), osm_way_id]
with open("/data/openpilot/selfdrive/data_collection/gps-data", "a") as f:
f.write("{}\n".format(location))
except:
self.logger.error("Unable to write gps data to external file")
def run(self):
pass # will be overridden in the child class
class QueryThread(LoggerThread):
def __init__(self, threadID, name, sharedParams={}): # sharedParams is dict of params shared between two threads
# invoke parent constructor https://stackoverflow.com/questions/2399307/how-to-invoke-the-super-constructor-in-python
LoggerThread.__init__(self, threadID, name)
self.sharedParams = sharedParams
# memorize some parameters
self.OVERPASS_API_LOCAL = "http://192.168.43.1:12345/api/interpreter"
socket.setdefaulttimeout(15)
self.distance_to_edge = 500
self.OVERPASS_API_URL = "https://z.overpass-api.de/api/interpreter"
self.OVERPASS_API_URL2 = "https://lz4.overpass-api.de/api/interpreter"
self.OVERPASS_HEADERS = {
'User-Agent': 'NEOS (comma.ai)',
'Accept-Encoding': 'gzip'
}
self.prev_ecef = None
def is_connected_to_local(self, timeout=3.0):
try:
requests.get(self.OVERPASS_API_LOCAL, timeout=timeout)
self.logger.debug("connection local active")
return True
except:
self.logger.error("No local server available.")
return False
def is_connected_to_internet(self, timeout=1.0):
try:
requests.get(self.OVERPASS_API_URL, timeout=timeout)
self.logger.debug("connection 1 active")
return True
except:
self.logger.error("No internet connection available.")
return False
def is_connected_to_internet2(self, timeout=1.0):
try:
requests.get(self.OVERPASS_API_URL2, timeout=timeout)
self.logger.debug("connection 2 active")
return True
except:
self.logger.error("No internet connection available.")
return False
def build_way_query(self, lat, lon, heading, radius=50):
"""Builds a query to find all highways within a given radius around a point"""
a = 111132.954*math.cos(float(lat)/180*3.141592)
b = 111132.954 - 559.822 * math.cos( 2 * float(lat)/180*3.141592) + 1.175 * math.cos( 4 * float(lat)/180*3.141592)
heading = math.radians(-heading + 90)
lat = lat+math.sin(heading)*radius/2/b
lon = lon+math.cos(heading)*radius/2/a
pos = " (around:%f,%f,%f)" % (radius, lat, lon)
lat_lon = "(%f,%f)" % (lat, lon)
q = """(
way
""" + pos + """
[highway][highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape)$"];
>;);out;""" + """is_in""" + lat_lon + """;area._[admin_level~"[24]"];
convert area ::id = id(), admin_level = t['admin_level'],
name = t['name'], "ISO3166-1:alpha2" = t['ISO3166-1:alpha2'];out;
"""
self.logger.debug("build_way_query : %s" % str(q))
return q, lat, lon
def run(self):
self.logger.debug("run method started for thread %s" % self.name)
# for now we follow old logic, will be optimized later
start = time.time()
radius = 3000
while True:
if time.time() - start > 2.0:
print("Mapd QueryThread lagging by: %s" % str(time.time() - start - 1.0))
if time.time() - start < 1.0:
time.sleep(0.1)
continue
else:
start = time.time()
self.logger.debug("Starting after sleeping for 1 second ...")
last_gps = self.sharedParams.get('last_gps', None)
self.logger.debug("last_gps = %s" % str(last_gps))
if last_gps is not None:
fix_ok = last_gps.flags & 1
if not fix_ok:
continue
else:
continue
last_query_pos = self.sharedParams.get('last_query_pos', None)
if last_query_pos is not None:
cur_ecef = geodetic2ecef((last_gps.latitude, last_gps.longitude, last_gps.altitude))
if self.prev_ecef is None:
self.prev_ecef = geodetic2ecef((last_query_pos.latitude, last_query_pos.longitude, last_query_pos.altitude))
dist = np.linalg.norm(cur_ecef - self.prev_ecef)
if dist < radius - self.distance_to_edge: #updated when we are close to the edge of the downloaded circle
continue
self.logger.debug("parameters, cur_ecef = %s, prev_ecef = %s, dist=%s" % (str(cur_ecef), str(self.prev_ecef), str(dist)))
if dist > radius:
query_lock = self.sharedParams.get('query_lock', None)
if query_lock is not None:
query_lock.acquire()
self.sharedParams['cache_valid'] = False
query_lock.release()
else:
self.logger.error("There is no query_lock")
if last_gps is not None and last_gps.accuracy < 5.0:
q, lat, lon = self.build_way_query(last_gps.latitude, last_gps.longitude, last_gps.bearing, radius=radius)
try:
if self.is_connected_to_local():
api = overpy.Overpass(url=self.OVERPASS_API_LOCAL)
api.timeout = 15.0
self.distance_to_edge = radius * 3 / 8
elif self.is_connected_to_internet():
api = overpy.Overpass(url=self.OVERPASS_API_URL)
self.logger.error("Using origional Server")
self.distance_to_edge = radius/4
elif self.is_connected_to_internet2():
api = overpy.Overpass(url=self.OVERPASS_API_URL2)
api.timeout = 10.0
self.logger.error("Using backup Server")
self.distance_to_edge = radius/4
else:
continue
new_result = api.query(q)
self.logger.debug("new_result = %s" % str(new_result))
# Build kd-tree
nodes = []
real_nodes = []
node_to_way = defaultdict(list)
location_info = {}
for n in new_result.nodes:
nodes.append((float(n.lat), float(n.lon), 0))
real_nodes.append(n)
for way in new_result.ways:
for n in way.nodes:
node_to_way[n.id].append(way)
for area in new_result.areas:
if area.tags.get('admin_level', '') == "2":
location_info['country'] = area.tags.get('ISO3166-1:alpha2', '')
elif area.tags.get('admin_level', '') == "4":
location_info['region'] = area.tags.get('name', '')
nodes = np.asarray(nodes)
nodes = geodetic2ecef(nodes)
tree = spatial.KDTree(nodes)
self.logger.debug("query thread, ... %s %s" % (str(nodes), str(tree)))
# write result
query_lock = self.sharedParams.get('query_lock', None)
if query_lock is not None:
query_lock.acquire()
last_gps_mod = last_gps.as_builder()
last_gps_mod.latitude = lat
last_gps_mod.longitude = lon
last_gps = last_gps_mod.as_reader()
self.sharedParams['last_query_result'] = new_result, tree, real_nodes, node_to_way, location_info
self.prev_ecef = geodetic2ecef((last_gps.latitude, last_gps.longitude, last_gps.altitude))
self.sharedParams['last_query_pos'] = last_gps
self.sharedParams['cache_valid'] = True
query_lock.release()
else:
self.logger.error("There is not query_lock")
except Exception as e:
self.logger.error("ERROR :" + str(e))
print(str(e))
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
self.sharedParams['last_query_result'] = None
query_lock.release()
else:
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
self.sharedParams['last_query_result'] = None
query_lock.release()
self.logger.debug("end of one cycle in endless loop ...")
class MapsdThread(LoggerThread):
def __init__(self, threadID, name, sharedParams={}):
# invoke parent constructor
LoggerThread.__init__(self, threadID, name)
self.sharedParams = sharedParams
self.pm = messaging.PubMaster(['liveMapData'])
self.logger.debug("entered mapsd_thread, ... %s" % ( str(self.pm)))
def run(self):
self.logger.debug("Entered run method for thread :" + str(self.name))
cur_way = None
curvature_valid = False
curvature = None
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
max_speed = None
max_speed_ahead = None
max_speed_ahead_dist = None
max_speed_prev = 0
had_good_gps = False
start = time.time()
while True:
if time.time() - start > 0.2:
print("Mapd MapsdThread lagging by: %s" % str(time.time() - start - 0.1))
if time.time() - start < 0.1:
time.sleep(0.01)
continue
else:
start = time.time()
self.logger.debug("starting new cycle in endless loop")
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
gps = self.sharedParams['last_gps']
traffic_status = self.sharedParams['traffic_status']
traffic_confidence = self.sharedParams['traffic_confidence']
last_not_none_signal = self.sharedParams['last_not_none_signal']
speedLimittraffic = self.sharedParams['speedLimittraffic']
speedLimittrafficvalid = self.sharedParams['speedLimittrafficvalid']
speedLimittrafficAdvisory = self.sharedParams['speedLimittrafficAdvisory']
speedLimittrafficAdvisoryvalid = self.sharedParams['speedLimittrafficAdvisoryvalid']
query_lock.release()
if gps is None:
continue
fix_ok = gps.flags & 1
self.logger.debug("fix_ok = %s" % str(fix_ok))
if gps.accuracy > 2.5:
if gps.accuracy > 5.0:
if not speedLimittrafficvalid:
if had_good_gps:
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
self.sharedParams['speedLimittrafficvalid'] = True
if max_speed is not None:
speedLimittraffic = max_speed * 3.6
else:
speedLimittraffic = 130
query_lock.release()
else:
fix_ok = False
had_good_gps = False
if not speedLimittrafficvalid and not had_good_gps:
fix_ok = False
elif not had_good_gps:
had_good_gps = True
if not fix_ok or self.sharedParams['last_query_result'] is None or not self.sharedParams['cache_valid']:
self.logger.debug("fix_ok %s" % fix_ok)
self.logger.error("Error in fix_ok logic")
cur_way = None
curvature = None
max_speed_ahead = None
max_speed_ahead_dist = None
curvature_valid = False
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
map_valid = False
else:
map_valid = True
lat = gps.latitude
lon = gps.longitude
heading = gps.bearing
speed = gps.speed
query_lock.acquire()
cur_way = Way.closest(self.sharedParams['last_query_result'], lat, lon, heading, cur_way)
query_lock.release()
if cur_way is not None:
self.logger.debug("cur_way is not None ...")
pnts, curvature_valid = cur_way.get_lookahead(lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
if pnts is not None:
xs = pnts[:, 0]
ys = pnts[:, 1]
road_points = [float(x) for x in xs], [float(y) for y in ys]
if speed < 5:
curvature_valid = False
if curvature_valid and pnts.shape[0] <= 3:
curvature_valid = False
else:
curvature_valid = False
upcoming_curvature = 0.
curvature = None
dist_to_turn = 0.
# The curvature is valid when at least MAPS_LOOKAHEAD_DISTANCE of road is found
if curvature_valid:
# Compute the curvature for each point
with np.errstate(divide='ignore'):
circles = [circle_through_points(*p, direction=True) for p in zip(pnts, pnts[1:], pnts[2:])]
circles = np.asarray(circles)
radii = np.nan_to_num(circles[:, 2])
radii[abs(radii) < 15.] = 10000
if cur_way.way.tags['highway'] == 'trunk' or cur_way.way.tags['highway'] == 'motorway_link':
radii = radii*1.6 # https://media.springernature.com/lw785/springer-static/image/chp%3A10.1007%2F978-3-658-01689-0_21/MediaObjects/298553_35_De_21_Fig65_HTML.gif
elif cur_way.way.tags['highway'] == 'motorway':
radii = radii*2.8
curvature = 1. / radii
rate = [rate_curvature_points(*p) for p in zip(pnts[1:], pnts[2:],curvature[0:],curvature[1:])]
rate = ([0] + rate)
curvature = np.abs(curvature)
curvature = np.multiply(np.minimum(np.multiply(rate,4000)+0.7,1.1),curvature)
# Index of closest point
closest = np.argmin(np.linalg.norm(pnts, axis=1))
dist_to_closest = pnts[closest, 0] # We can use x distance here since it should be close
# Compute distance along path
dists = list()
dists.append(0)
for p, p_prev in zip(pnts, pnts[1:, :]):
dists.append(dists[-1] + np.linalg.norm(p - p_prev))
dists = np.asarray(dists)
dists = dists - dists[closest] + dist_to_closest
dists = dists[1:-1]
close_idx = np.logical_and(dists > 0, dists < 500)
dists = dists[close_idx]
curvature = curvature[close_idx]
if len(curvature):
curvature = np.nan_to_num(curvature)
upcoming_curvature = np.amax(curvature)
dist_to_turn =np.amin(dists[np.logical_and(curvature >= upcoming_curvature, curvature <= upcoming_curvature)])
else:
upcoming_curvature = 0.
dist_to_turn = 999
dat = messaging.new_message()
dat.init('liveMapData')
last_gps = self.sharedParams.get('last_gps', None)
if last_gps is not None:
dat.liveMapData.lastGps = last_gps
if cur_way is not None:
dat.liveMapData.wayId = cur_way.id
self.sharedParams['osm_way_id'] = cur_way.id
# Speed limit
max_speed = cur_way.max_speed(heading)
max_speed_ahead = None
max_speed_ahead_dist = None
if max_speed is not None:
max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(max_speed, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE, traffic_status, traffic_confidence, last_not_none_signal)
else:
max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(speed*1.1, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE, traffic_status, traffic_confidence, last_not_none_signal)
# TODO: anticipate T junctions and right and left hand turns based on indicator
if max_speed_ahead is not None and max_speed_ahead_dist is not None:
dat.liveMapData.speedLimitAheadValid = True
dat.liveMapData.speedLimitAhead = float(max_speed_ahead)
dat.liveMapData.speedLimitAheadDistance = float(max_speed_ahead_dist)
if max_speed is not None:
if abs(max_speed - max_speed_prev) > 0.1:
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
self.sharedParams['speedLimittrafficvalid'] = False
query_lock.release()
max_speed_prev = max_speed
advisory_max_speed = cur_way.advisory_max_speed()
if speedLimittrafficAdvisoryvalid:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = speedLimittrafficAdvisory / 3.6
else:
if advisory_max_speed is not None:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = advisory_max_speed
# Curvature
dat.liveMapData.curvatureValid = curvature_valid
dat.liveMapData.curvature = float(upcoming_curvature)
dat.liveMapData.distToTurn = float(dist_to_turn)
if road_points is not None:
dat.liveMapData.roadX, dat.liveMapData.roadY = road_points
if curvature is not None:
dat.liveMapData.roadCurvatureX = [float(x) for x in dists]
dat.liveMapData.roadCurvature = [float(x) for x in curvature]
else:
self.sharedParams['osm_way_id'] = 0
if self.sharedParams['speedLimittrafficvalid']:
if speedLimittraffic > 0.1:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = speedLimittraffic / 3.6
map_valid = False
else:
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
self.sharedParams['speedLimittrafficvalid'] = False
query_lock.release()
else:
if max_speed is not None and map_valid:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = max_speed
dat.liveMapData.mapValid = map_valid
self.logger.debug("Sending ... liveMapData ... %s", str(dat))
self.pm.send('liveMapData', dat)
class MessagedGPSThread(LoggerThread):
def __init__(self, threadID, name, sharedParams={}):
# invoke parent constructor
LoggerThread.__init__(self, threadID, name)
self.sharedParams = sharedParams
self.sm = messaging.SubMaster(['gpsLocationExternal'])
self.logger.debug("entered messagedGPS_thread, ... %s" % (str(self.sm)))
def run(self):
self.logger.debug("Entered run method for thread :" + str(self.name))
gps = None
start = time.time()
while True:
if time.time() - start > 0.2:
print("Mapd MessagedGPSThread lagging by: %s" % str(time.time() - start - 0.1))
if time.time() - start < 0.1:
time.sleep(0.01)
continue
else:
start = time.time()
self.logger.debug("starting new cycle in endless loop")
self.sm.update(0)
if self.sm.updated['gpsLocationExternal']:
gps = self.sm['gpsLocationExternal']
self.save_gps_data(gps, self.sharedParams['osm_way_id'])
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
self.sharedParams['last_gps'] = gps
query_lock.release()
self.logger.debug("setting last_gps to %s" % str(gps))
class MessagedArneThread(LoggerThread):
def __init__(self, threadID, name, sharedParams={}):
# invoke parent constructor
LoggerThread.__init__(self, threadID, name)
self.sharedParams = sharedParams
self.sm = messaging.SubMaster(['liveTrafficData'])#,'trafficModelEvent'])
#self.logger.debug("entered messageArned_thread, ... %s" % str(self.arne_sm))
def run(self):
self.logger.debug("Entered run method for thread :" + str(self.name))
last_not_none_signal = 'NONE'
last_not_none_signal_counter = 0
traffic_confidence = 0
traffic_status = 'NONE'
speedLimittraffic = 0
speedLimittraffic_prev = 0
speedLimittrafficAdvisoryvalid = False
speedLimittrafficAdvisory = 0
start = time.time()
while True:
if time.time() - start > 0.2:
print("Mapd MessagedArneThread lagging by: %s" % str(time.time() - start - 0.1))
if time.time() - start < 0.1:
time.sleep(0.01)
continue
else:
start = time.time()
self.logger.debug("starting new cycle in endless loop")
self.sm.update(0)
#if self.arne_sm.updated['trafficModelEvent']:
# traffic_status = self.arne_sm['trafficModelEvent'].status
#traffic_confidence = round(self.arne_sm['trafficModelEvent'].confidence * 100, 2)
#if traffic_confidence >= 50 and (traffic_status == 'GREEN' or traffic_status == 'SLOW'):
#last_not_none_signal = traffic_status
#last_not_none_signal_counter = 0
#elif traffic_confidence >= 50 and traffic_status == 'NONE' and last_not_none_signal != 'NONE':
#if last_not_none_signal_counter < 25:
#last_not_none_signal_counter = last_not_none_signal_counter + 1
#print("self.last_not_none_signal_counter")
#print(self.last_not_none_signal_counter)
#print("self.last_not_none_signal")
#print(self.last_not_none_signal)
#else:
#last_not_none_signal = 'NONE'
query_lock = self.sharedParams.get('query_lock', None)
query_lock.acquire()
speedLimittrafficvalid = self.sharedParams['speedLimittrafficvalid']
query_lock.release()
traffic = self.sm['liveTrafficData']
if traffic.speedLimitValid:
speedLimittraffic = traffic.speedLimit
if abs(speedLimittraffic_prev - speedLimittraffic) > 0.1:
speedLimittrafficvalid = True
speedLimittraffic_prev = speedLimittraffic
else:
speedLimittrafficvalid = False
if traffic.speedAdvisoryValid:
speedLimittrafficAdvisory = traffic.speedAdvisory
speedLimittrafficAdvisoryvalid = True
else:
speedLimittrafficAdvisoryvalid = False
query_lock.acquire()
self.sharedParams['traffic_status'] = traffic_status
self.sharedParams['traffic_confidence'] = traffic_confidence
self.sharedParams['last_not_none_signal'] = last_not_none_signal
self.sharedParams['speedLimittraffic'] = speedLimittraffic
self.sharedParams['speedLimittrafficvalid'] = speedLimittrafficvalid
self.sharedParams['speedLimittrafficAdvisory'] = speedLimittrafficAdvisory
self.sharedParams['speedLimittrafficAdvisoryvalid'] = speedLimittrafficAdvisoryvalid
query_lock.release()
def main():
params = Params()
dongle_id = params.get("DongleId")
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
crash.install()
# setup shared parameters
last_gps = None
query_lock = threading.Lock()
last_query_result = None
last_query_pos = None
cache_valid = False
traffic_status = 'None'
traffic_confidence = 100
last_not_none_signal = 'None'
speedLimittraffic = 0
speedLimittrafficvalid = False
speedLimittrafficAdvisory = 0
osm_way_id = 0
speedLimittrafficAdvisoryvalid = False
sharedParams = {'last_gps' : last_gps, 'query_lock' : query_lock, 'last_query_result' : last_query_result, \
'last_query_pos' : last_query_pos, 'cache_valid' : cache_valid, 'traffic_status' : traffic_status, \
'traffic_confidence' : traffic_confidence, 'last_not_none_signal' : last_not_none_signal, \
'speedLimittraffic' : speedLimittraffic, 'speedLimittrafficvalid' : speedLimittrafficvalid, \
'speedLimittrafficAdvisory' : speedLimittrafficAdvisory, 'speedLimittrafficAdvisoryvalid' : speedLimittrafficAdvisoryvalid, 'osm_way_id' : osm_way_id}
qt = QueryThread(1, "QueryThread", sharedParams=sharedParams)
mt = MapsdThread(2, "MapsdThread", sharedParams=sharedParams)
mggps = MessagedGPSThread(3, "MessagedGPSThread", sharedParams=sharedParams)
mgarne = MessagedArneThread(4, "MessagedArneThread", sharedParams=sharedParams)
qt.start()
mt.start()
mggps.start()
mgarne.start()
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,506
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/interceptor/__init__.py
|
#!/usr/bin/env python
PARTS = ['negative', 'positive', 'full'] # up and left is negative, down and right is positive
MODES = ['interceptor', 'injector', 'adder']
class Interceptor:
def __init__(self):
self.enabled = False
self.interceptor = None
self.watchdog = 2 * 1e9 # In 2 sec disable interceptor if there is no new messages
def update(self, msg, msg_timestamp, current_timestamp):
if msg_timestamp < current_timestamp - self.watchdog:
self.enabled = False
return
self.enabled = msg.enabled
self.interceptor = msg
def override_axis(self, signal, index, part, scale=1.0):
if (not self.enabled
or len(self.interceptor.axes) < index + 1
or part not in PARTS):
return signal
signal_candidate = self.interceptor.axes[index] * scale
if part == 'negative':
signal_candidate = max(-signal_candidate, 0.)
elif part == 'positive':
signal_candidate = max(signal_candidate, 0.)
if (len(self.interceptor.axesMode) <= index
or self.interceptor.axesMode[index] in ('', 'interceptor')):
pass
elif self.interceptor.axesMode[index] == 'injector':
if signal_candidate == 0:
signal_candidate = signal
elif self.interceptor.axesMode[index] == 'adder':
signal_candidate = signal + signal_candidate
return signal_candidate
def override_button(self, signal, index, values=(True, False)):
if (not self.enabled
or len(self.interceptor.buttons) < index + 1):
return signal
if self.interceptor.buttons[index]:
signal_candidate = values[0]
else:
signal_candidate = values[1]
return signal_candidate
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,507
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/dragonpilot/gpxd.py
|
#!/usr/bin/env python3.7
#pylint: skip-file
'''
GPS cord converter: https://gist.github.com/jp1017/71bd0976287ce163c11a7cb963b04dd8
'''
import cereal.messaging as messaging
import os
import time
import datetime
import signal
import threading
import math
import zipfile
pi = 3.1415926535897932384626
x_pi = 3.14159265358979324 * 3000.0 / 180.0
a = 6378245.0
ee = 0.00669342162296594323
GPX_LOG_PATH = '/sdcard/gpx_logs/'
LOG_DELAY = 0.1 # secs, lower for higher accuracy, 0.1 seems fine
LOG_LENGTH = 60 # mins, higher means it keeps more data in the memory, will take more time to write into a file too.
LOST_SIGNAL_COUNT_LENGTH = 30 # secs, if we lost signal for this long, perform output to data
MIN_MOVE_SPEED_KMH = 5 # km/h, min speed to trigger logging
# do not change
LOST_SIGNAL_COUNT_MAX = LOST_SIGNAL_COUNT_LENGTH / LOG_DELAY # secs,
LOGS_PER_FILE = LOG_LENGTH * 60 / LOG_DELAY # e.g. 3 * 60 / 0.1 = 1800 points per file
MIN_MOVE_SPEED_MS = MIN_MOVE_SPEED_KMH / 3.6
class WaitTimeHelper:
ready_event = threading.Event()
shutdown = False
def __init__(self):
signal.signal(signal.SIGTERM, self.graceful_shutdown)
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGHUP, self.graceful_shutdown)
def graceful_shutdown(self, signum, frame):
self.shutdown = True
self.ready_event.set()
def main():
# init
sm = messaging.SubMaster(['gpsLocationExternal'])
log_count = 0
logs = list()
lost_signal_count = 0
wait_helper = WaitTimeHelper()
started_time = datetime.datetime.utcnow().isoformat()
# outside_china_checked = False
# outside_china = False
while True:
sm.update()
if sm.updated['gpsLocationExternal']:
gps = sm['gpsLocationExternal']
# do not log when no fix or accuracy is too low, add lost_signal_count
if gps.flags % 2 == 0 or gps.accuracy > 5.:
if log_count > 0:
lost_signal_count += 1
else:
lng = gps.longitude
lat = gps.latitude
# if not outside_china_checked:
# outside_china = out_of_china(lng, lat)
# outside_china_checked = True
# if not outside_china:
# lng, lat = wgs84togcj02(lng, lat)
logs.append([datetime.datetime.utcfromtimestamp(gps.timestamp*0.001).isoformat(), lat, lng, gps.altitude])
log_count += 1
lost_signal_count = 0
'''
write to log if
1. reach per file limit
2. lost signal for a certain time (e.g. under cover car park?)
'''
if log_count > 0 and (log_count >= LOGS_PER_FILE or lost_signal_count >= LOST_SIGNAL_COUNT_MAX):
# output
to_gpx(logs, started_time)
lost_signal_count = 0
log_count = 0
logs.clear()
started_time = datetime.datetime.utcnow().isoformat()
time.sleep(LOG_DELAY)
if wait_helper.shutdown:
break
# when process end, we store any logs.
if log_count > 0:
to_gpx(logs, started_time)
'''
check to see if it's in china
'''
def out_of_china(lng, lat):
if lng < 72.004 or lng > 137.8347:
return True
elif lat < 0.8293 or lat > 55.8271:
return True
return False
def transform_lat(lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + 0.1 * lng * lat + 0.2 * math.sqrt(abs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 * math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lat * pi) + 40.0 * math.sin(lat / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(lat / 12.0 * pi) + 320 * math.sin(lat * pi / 30.0)) * 2.0 / 3.0
return ret
def transform_lng(lng, lat):
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + 0.1 * lng * lat + 0.1 * math.sqrt(abs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 * math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lng * pi) + 40.0 * math.sin(lng / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(lng / 12.0 * pi) + 300.0 * math.sin(lng / 30.0 * pi)) * 2.0 / 3.0
return ret
'''
Convert wgs84 to gcj02 (
'''
def wgs84togcj02(lng, lat):
if out_of_china(lng, lat):
return lng, lat
dlat = transform_lat(lng - 105.0, lat - 35.0)
dlng = transform_lng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return mglng, mglat
'''
write logs to a gpx file and zip it
'''
def to_gpx(logs, timestamp):
if len(logs) > 0:
if not os.path.exists(GPX_LOG_PATH):
os.makedirs(GPX_LOG_PATH)
filename = timestamp.replace(':','-')
str = ''
str += "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>\n"
str += "<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd\" version=\"1.1\">\n"
str += "\t<trk>\n"
str += "\t\t<trkseg>\n"
for trkpt in logs:
str += "\t\t\t<trkpt time=\"%sZ\" lat=\"%s\" lon=\"%s\" ele=\"%s\" />\n" % (trkpt[0], trkpt[1], trkpt[2], trkpt[3])
str += "\t\t</trkseg>\n"
str += "\t</trk>\n"
str += "</gpx>\n"
try:
zi = zipfile.ZipInfo('%sZ.gpx' % filename, time.localtime())
zi.compress_type = zipfile.ZIP_DEFLATED
zf = zipfile.ZipFile('%s%sZ.zip' % (GPX_LOG_PATH, filename), mode='w')
zf.writestr(zi, str)
zf.close()
except Exception:
pass
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,508
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/process_replay/inject_model.py
|
#!/usr/bin/env python3
import time
from tqdm import tqdm
import selfdrive.manager as manager
from cereal.messaging import PubMaster, recv_one, sub_sock
from tools.lib.framereader import FrameReader
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def regen_model(msgs, pm, frame_reader, model_sock):
# Send some livecalibration messages to initalize visiond
for msg in msgs:
if msg.which() == 'liveCalibration':
pm.send('liveCalibration', msg.as_builder())
out_msgs = []
fidx = 0
for msg in tqdm(msgs):
w = msg.which()
if w == 'frame':
msg = msg.as_builder()
img = frame_reader.get(fidx, pix_fmt="rgb24")[0][:,:,::-1]
msg.frame.image = img.flatten().tobytes()
pm.send(w, msg)
model = recv_one(model_sock)
fidx += 1
out_msgs.append(model)
elif w == 'liveCalibration':
pm.send(w, msg.as_builder())
return out_msgs
def inject_model(msgs, segment_name):
if segment_name.count('--') == 2:
segment_name = rreplace(segment_name, '--', '/', 1)
frame_reader = FrameReader('cd:/'+segment_name.replace("|", "/") + "/fcamera.hevc")
manager.start_managed_process('camerad')
manager.start_managed_process('modeld')
# TODO do better than just wait for modeld to boot
time.sleep(5)
pm = PubMaster(['liveCalibration', 'frame'])
model_sock = sub_sock('model')
try:
out_msgs = regen_model(msgs, pm, frame_reader, model_sock)
except (KeyboardInterrupt, SystemExit, Exception) as e:
manager.kill_managed_process('modeld')
time.sleep(2)
manager.kill_managed_process('camerad')
raise e
manager.kill_managed_process('modeld')
time.sleep(2)
manager.kill_managed_process('camerad')
new_msgs = []
midx = 0
for msg in msgs:
if (msg.which() == 'model') and (midx < len(out_msgs)):
model = out_msgs[midx].as_builder()
model.logMonoTime = msg.logMonoTime
model = model.as_reader()
new_msgs.append(model)
midx += 1
else:
new_msgs.append(msg)
print(len(new_msgs), len(list(msgs)))
assert abs(len(new_msgs) - len(list(msgs))) < 2
return new_msgs
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,509
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/dragonpilot/dashcam.py
|
#!/usr/bin/env python3.7
import os
import datetime
from common.realtime import sec_since_boot
DASHCAM_VIDEOS_PATH = '/sdcard/dashcam/'
DASHCAM_DURATION = 180 # max is 180
DASHCAM_BIT_RATES = 4000000 # max is 4000000
DASHCAM_MAX_SIZE_PER_FILE = DASHCAM_BIT_RATES/8*DASHCAM_DURATION # 4Mbps / 8 * 180 = 90MB per 180 seconds
DASHCAM_FREESPACE_LIMIT = 0.15 # we start cleaning up footage when freespace is below 15%
DASHCAM_KEPT = DASHCAM_MAX_SIZE_PER_FILE * 240 # 12 hrs of video = 21GB
class Dashcam():
def __init__(self):
self.dashcam_folder_exists = False
self.dashcam_mkdir_retry = 0
self.dashcam_next_time = 0
self.started = False
self.free_space = 1.
def run(self, started, free_space):
self.started = started
self.free_space = free_space
self.make_folder()
if self.dashcam_folder_exists:
self.record()
self.clean_up()
def make_folder(self):
if not self.dashcam_folder_exists and self.dashcam_mkdir_retry <= 5:
# create dashcam folder if not exist
try:
if not os.path.exists(DASHCAM_VIDEOS_PATH):
os.makedirs(DASHCAM_VIDEOS_PATH)
else:
self.dashcam_folder_exists = True
except OSError:
self.dashcam_folder_exists = False
self.dashcam_mkdir_retry += 1
def record(self):
# start recording
if self.started:
ts = sec_since_boot()
if ts >= self.dashcam_next_time:
now = datetime.datetime.now()
file_name = now.strftime("%Y-%m-%d_%H-%M-%S")
os.system("screenrecord --bit-rate %s --time-limit %s %s%s.mp4 &" % (DASHCAM_BIT_RATES, DASHCAM_DURATION, DASHCAM_VIDEOS_PATH, file_name))
self.dashcam_next_time = ts + DASHCAM_DURATION - 1
else:
self.dashcam_next_time = 0
def clean_up(self):
# clean up
if (self.free_space < DASHCAM_FREESPACE_LIMIT) or (self.get_used_spaces() > DASHCAM_KEPT):
try:
files = [f for f in sorted(os.listdir(DASHCAM_VIDEOS_PATH)) if os.path.isfile(DASHCAM_VIDEOS_PATH + f)]
os.system("rm -fr %s &" % (DASHCAM_VIDEOS_PATH + files[0]))
except (IndexError, FileNotFoundError, OSError):
pass
def get_used_spaces(self):
try:
val = sum(os.path.getsize(DASHCAM_VIDEOS_PATH + f) for f in os.listdir(DASHCAM_VIDEOS_PATH) if os.path.isfile(DASHCAM_VIDEOS_PATH + f))
except (IndexError, FileNotFoundError, OSError):
val = 0
return val
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,510
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/dragonpilot/systemd.py
|
#!/usr/bin/env python3
#pylint: disable=W0105
'''
This is a service that broadcast dp config values to openpilot's messaging queues
'''
import cereal.messaging as messaging
import time
from math import floor
from common.dp_conf import confs, get_struct_name, to_struct_val
from common.params import Params, put_nonblocking
import subprocess
import re
import os
from typing import Dict, Any
from selfdrive.thermald.power_monitoring import set_battery_charging, get_battery_charging
params = Params()
from common.realtime import sec_since_boot
from common.i18n import get_locale
from common.dp_common import param_get, get_last_modified
from common.dp_time import LAST_MODIFIED_SYSTEMD
from selfdrive.dragonpilot.dashcam import Dashcam
from common.travis_checker import travis
if travis:
PARAM_PATH = str(os.environ.get('HOME')) + "/.comma/params/d/"
else:
PARAM_PATH = '/data/params/d/'
files = os.listdir(PARAM_PATH)
for file in files:
print(file)
if not os.path.exists(PARAM_PATH + "dp_last_modified"):
params.put('dp_last_modified',str(floor(time.time())))
print("dp_last_modified read from file is " + str(params.get("dp_last_modified")))
if os.path.exists(PARAM_PATH + "dp_last_modified"):
print("dp_last_modified created succesfully")
DELAY = 0.5 # 2hz
HERTZ = 1/DELAY
last_modified_confs: Dict[str, Any] = {}
def confd_thread():
sm = messaging.SubMaster(['thermal'])
pm = messaging.PubMaster(['dragonConf'])
last_dp_msg = None
frame = 0
update_params = False
modified = None
last_modified = None
last_modified_check = None
started = False
free_space = 1
battery_percent = 0
overheat = False
last_charging_ctrl = False
last_started = False
dashcam = Dashcam()
while True:
start_sec = sec_since_boot()
msg = messaging.new_message('dragonConf')
if last_dp_msg is not None:
msg.dragonConf = last_dp_msg
'''
===================================================
load thermald data every 3 seconds
===================================================
'''
if frame % (HERTZ * 3) == 0:
started, free_space, battery_percent, overheat = pull_thermald(frame, sm, started, free_space, battery_percent, overheat)
setattr(msg.dragonConf, get_struct_name('dp_thermal_started'), started)
setattr(msg.dragonConf, get_struct_name('dp_thermal_overheat'), overheat)
'''
===================================================
hotspot on boot
we do it after 30 secs just in case
===================================================
'''
if frame == (HERTZ * 30) and param_get("dp_hotspot_on_boot", "bool", False):
os.system("service call wifi 37 i32 0 i32 1 &")
'''
===================================================
check dp_last_modified every second
===================================================
'''
if not update_params:
last_modified_check, modified = get_last_modified(LAST_MODIFIED_SYSTEMD, last_modified_check, modified)
if last_modified != modified:
update_params = True
last_modified = modified
'''
===================================================
conditionally set update_params to true
===================================================
'''
# force updating param when `started` changed
if last_started != started:
update_params = True
last_started = started
if frame == 0:
update_params = True
'''
===================================================
conditionally update dp param base on stock param
===================================================
'''
if update_params and params.get("LaneChangeEnabled") == b"1":
params.put("dp_steering_on_signal", "0")
'''
===================================================
push param vals to message
===================================================
'''
if update_params:
msg = update_conf_all(confs, msg, frame == 0)
update_params = False
'''
===================================================
push once
===================================================
'''
if frame == 0:
setattr(msg.dragonConf, get_struct_name('dp_locale'), get_locale())
put_nonblocking('dp_is_updating', '0')
'''
===================================================
push ip addr every 10 secs
===================================================
'''
if frame % (HERTZ * 10) == 0:
msg = update_ip(msg)
'''
===================================================
push is_updating status every 5 secs
===================================================
'''
if frame % (HERTZ * 5) == 0:
msg = update_updating(msg)
'''
===================================================
update msg based on some custom logic
===================================================
'''
msg = update_custom_logic(msg)
'''
===================================================
battery ctrl every 30 secs
PowerMonitor in thermald turns back on every mins
so lets turn it off more frequent
===================================================
'''
if frame % (HERTZ * 30) == 0:
last_charging_ctrl = process_charging_ctrl(msg, last_charging_ctrl, battery_percent)
'''
===================================================
dashcam
===================================================
'''
if msg.dragonConf.dpDashcam and frame % HERTZ == 0:
dashcam.run(started, free_space)
'''
===================================================
finalise
===================================================
'''
last_dp_msg = msg.dragonConf
pm.send('dragonConf', msg)
frame += 1
sleep = DELAY-(sec_since_boot() - start_sec)
if sleep > 0:
time.sleep(sleep)
def update_conf(msg, conf, first_run = False):
conf_type = conf.get('conf_type')
# skip checking since modified date time hasn't been changed.
if (last_modified_confs.get(conf['name'])) is not None and last_modified_confs.get(conf['name']) == os.stat(PARAM_PATH + conf['name']).st_mtime:
return msg
if 'param' in conf_type and 'struct' in conf_type:
update_this_conf = True
if not first_run:
update_once = conf.get('update_once')
if update_once is not None and update_once is True:
return msg
if update_this_conf:
update_this_conf = check_dependencies(msg, conf)
if update_this_conf:
msg = set_message(msg, conf)
if os.path.isfile(PARAM_PATH + conf['name']):
last_modified_confs[conf['name']] = os.stat(PARAM_PATH + conf['name']).st_mtime
return msg
def update_conf_all(confs, msg, first_run = False):
for conf in confs:
msg = update_conf(msg, conf, first_run)
return msg
def process_charging_ctrl(msg, last_charging_ctrl, battery_percent):
charging_ctrl = msg.dragonConf.dpChargingCtrl
if last_charging_ctrl != charging_ctrl:
set_battery_charging(True)
if charging_ctrl:
if battery_percent >= msg.dragonConf.dpDischargingAt and get_battery_charging():
set_battery_charging(False)
elif battery_percent <= msg.dragonConf.dpChargingAt and not get_battery_charging():
set_battery_charging(True)
return charging_ctrl
def pull_thermald(frame, sm, started, free_space, battery_percent, overheat):
sm.update(0)
if sm.updated['thermal']:
started = sm['thermal'].started
free_space = sm['thermal'].freeSpace
battery_percent = sm['thermal'].batteryPercent
overheat = sm['thermal'].thermalStatus >= 2
return started, free_space, battery_percent, overheat
def update_custom_logic(msg):
if msg.dragonConf.dpAssistedLcMinMph > msg.dragonConf.dpAutoLcMinMph:
put_nonblocking('dp_auto_lc_min_mph', str(msg.dragonConf.dpAssistedLcMinMph))
msg.dragonConf.dpAutoLcMinMph = msg.dragonConf.dpAssistedLcMinMph
if msg.dragonConf.dpAtl:
msg.dragonConf.dpAllowGas = True
msg.dragonConf.dpDynamicFollow = 0
msg.dragonConf.dpAccelProfile = 0
msg.dragonConf.dpGearCheck = False
if msg.dragonConf.dpAppWaze or msg.dragonConf.dpAppHr:
msg.dragonConf.dpDrivingUi = False
if not msg.dragonConf.dpDriverMonitor:
msg.dragonConf.dpUiFace = False
return msg
def update_updating(msg):
setattr(msg.dragonConf, get_struct_name('dp_is_updating'), to_struct_val('dp_is_updating', param_get("dp_is_updating", "bool", False)))
return msg
def update_ip(msg):
val = 'N/A'
try:
result = subprocess.check_output(["ifconfig", "wlan0"], encoding='utf8')
val = re.findall(r"inet addr:((\d+\.){3}\d+)", result)[0][0]
except Exception:
pass
setattr(msg.dragonConf, get_struct_name('dp_ip_addr'), val)
return msg
def set_message(msg, conf):
val = params.get(conf['name'], encoding='utf8')
if val is not None:
val = val.rstrip('\x00')
else:
val = conf.get('default')
params.put(conf['name'], str(val))
struct_val = to_struct_val(conf['name'], val)
orig_val = struct_val
if struct_val is not None:
if conf.get('min') is not None:
struct_val = max(struct_val, conf.get('min'))
if conf.get('max') is not None:
struct_val = min(struct_val, conf.get('max'))
if orig_val != struct_val:
params.put(conf['name'], str(struct_val))
setattr(msg.dragonConf, get_struct_name(conf['name']), struct_val)
return msg
def check_dependencies(msg, conf):
passed = True
# if has dependency and the depend param val is not in depend_vals, we dont update that conf val
# this should reduce chance of reading unnecessary params
dependencies = conf.get('depends')
if dependencies is not None:
for dependency in dependencies:
if getattr(msg.dragonConf, get_struct_name(dependency['name'])) not in dependency['vals']:
passed = False
break
return passed
def main():
confd_thread()
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,511
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/tools/livedm/helpers.py
|
import numpy as np
import cv2 # pylint: disable=import-error
def rot_matrix(roll, pitch, yaw):
cr, sr = np.cos(roll), np.sin(roll)
cp, sp = np.cos(pitch), np.sin(pitch)
cy, sy = np.cos(yaw), np.sin(yaw)
rr = np.array([[1, 0, 0], [0, cr, -sr], [0, sr, cr]])
rp = np.array([[cp, 0, sp], [0, 1, 0], [-sp, 0, cp]])
ry = np.array([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]])
return ry.dot(rp.dot(rr))
def draw_pose(img, pose, loc, W=160, H=320, xyoffset=(0, 0), faceprob=0):
rcmat = np.zeros((3, 4))
rcmat[:, :3] = rot_matrix(*pose[0:3]) * 0.5
rcmat[0, 3] = (loc[0]+0.5) * W
rcmat[1, 3] = (loc[1]+0.5) * H
rcmat[2, 3] = 1.0
# draw nose
p1 = np.dot(rcmat, [0, 0, 0, 1])[0:2]
p2 = np.dot(rcmat, [0, 0, 100, 1])[0:2]
tr = tuple([int(round(x + xyoffset[i])) for i, x in enumerate(p1)])
pr = tuple([int(round(x + xyoffset[i])) for i, x in enumerate(p2)])
if faceprob > 0.4:
color = (255, 255, 0)
cv2.line(img, tr, pr, color=(255, 255, 0), thickness=3)
else:
color = (64, 64, 64)
cv2.circle(img, tr, 7, color=color)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,512
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/common/hardware_base.py
|
from abc import abstractmethod
class HardwareBase:
@staticmethod
def get_cmdline():
with open('/proc/cmdline') as f:
cmdline = f.read()
return {kv[0]: kv[1] for kv in [s.split('=') for s in cmdline.split(' ')] if len(kv) == 2}
@abstractmethod
def get_sound_card_online(self):
pass
@abstractmethod
def get_imei(self, slot):
pass
@abstractmethod
def get_serial(self):
pass
@abstractmethod
def get_subscriber_info(self):
pass
@abstractmethod
def reboot(self, reason=None):
pass
@abstractmethod
def get_network_type(self):
pass
@abstractmethod
def get_sim_info(self):
pass
@abstractmethod
def get_network_strength(self, network_type):
pass
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,513
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/interbridge/unisocket.py
|
#!/usr/bin/env python
#pylint: skip-file
# flake8: noqa
import struct, pickle, json
from base64 import b64encode
from hashlib import sha1
from socketserver import ThreadingTCPServer, StreamRequestHandler
from socket import timeout as SocketTimeoutError
# import ssl
class MixedSocketServer(ThreadingTCPServer):
allow_reuse_address = True
request_queue_size = 128
daemon_threads = True
clients = []
id_counter = 0
def __init__(self, addr):
host, port = addr.split(':')
self.addr = (host, int(port))
self.allow_reuse_address = True
ThreadingTCPServer.__init__(self, self.addr, MixedSocketHandler)
self.port = self.socket.getsockname()[1]
# For SSL (wss://) WebSocket use:
# def server_bind(self):
# ThreadingTCPServer.server_bind(self)
# self.socket = ssl.wrap_socket(
# self.socket, server_side=True,
# certfile='certPlusKey.pem',
# do_handshake_on_connect=False)
# def get_request(self):
# (socket, addr) = ThreadingTCPServer.get_request(self)
# socket.do_handshake()
# return (socket, addr)
def _msg_received_(self, handler, msg):
self.msg_received(self.handler_to_client(handler), self, msg)
def _client_add_(self, handler):
self.id_counter += 1
client = {
'id': self.id_counter,
'handler': handler,
'addr': handler.client_address # (ip, port) tuple
}
self.clients.append(client)
self.new_client(client, self)
def _client_remove_(self, handler):
client = self.handler_to_client(handler)
self.client_left(client, self)
if client in self.clients:
self.clients.remove(client)
def unicast(self, to_client, msg):
to_client['handler'].write_msg(msg)
def broadcast(self, msg):
for client in self.clients:
self.unicast(client, msg)
def handler_to_client(self, handler):
for client in self.clients:
if client['handler'] == handler:
return client
def count_clients(self):
return len(self.clients)
# Callback dummy functions, could be replaced with needed callbacks
def service_actions(self): # Additional actions for main server loop
pass
def new_client(self, client, server):
pass
def client_left(self, client, server):
pass
MASK = 0x80
FIN = 0x80
OPCODE = 0x0f
OPCODE_TEXT = 0x1
OPCODE_CLOSE_REQ = 0x8
PAYLOAD_LEN = 0x7f
PAYLOAD_LEN_EXT16 = 0x7e
PAYLOAD_LEN_EXT64 = 0x7f
GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'.encode()
class MixedSocketHandler(StreamRequestHandler):
def __init__(self, socket, addr, server):
self.server = server
self.timeout = 5
StreamRequestHandler.__init__(self, socket, addr, server)
def setup(self):
StreamRequestHandler.setup(self)
self.is_connected = True
self.handshaked = False
self.is_websocket = True
def handle(self):
while self.is_connected:
if not self.handshaked:
self.handshake()
else:
self.read_msg()
def read_msg(self):
if self.is_websocket:
self.read_websocket()
else:
self.read_socket()
def read_websocket(self):
try:
b1, b2 = self.rfile.read(2)
except (ConnectionResetError, ValueError, TimeoutError, SocketTimeoutError):
self.is_connected = False
self.handshaked = False
return
fin = b1 & FIN
opcode = b1 & OPCODE
masked = b2 & MASK
payload_len = b2 & PAYLOAD_LEN
if not masked:
# Client must mask messages and server is not
self.is_connected = False
return
if opcode == OPCODE_TEXT:
pass
elif opcode == OPCODE_CLOSE_REQ:
self.is_connected = False
return
else:
# Unknown opcode received
self.is_connected = False
return
if payload_len == 126:
payload_len = struct.unpack(">H", self.rfile.read(2))[0]
elif payload_len == 127:
payload_len = struct.unpack(">Q", self.rfile.read(8))[0]
masks = self.rfile.read(4)
msg_bytes = bytearray()
for msg_byte in self.rfile.read(payload_len):
msg_byte ^= masks[len(msg_bytes) % 4]
msg_bytes.append(msg_byte)
try:
self.server._msg_received_(self, json.loads(msg_bytes))
except json.decoder.JSONDecodeError:
self.server._msg_received_(self, msg_bytes.decode())
def read_socket(self):
try:
payload = self.rfile.read(4)
payload_len = struct.unpack('!I', payload)[0]
except ConnectionResetError: #(socket.error, struct.error):
self.is_connected = False
self.handshaked = False
return
payload = bytearray()
payload += self.rfile.read(payload_len)
self.server._msg_received_(self, pickle.loads(payload))
def write_msg(self, msg, opcode=OPCODE_TEXT):
if self.is_websocket:
self.write_websocket(msg, opcode)
else:
self.write_socket(msg)
def write_websocket(self, msg, opcode=OPCODE_TEXT):
if not msg:
return
# Validate message
if not isinstance(msg, (bytes, bytearray)):
msg = json.dumps(msg, skipkeys=True).encode()
header = bytearray()
payload = msg
payload_len = len(payload)
if payload_len <= 125:
header.append(FIN | opcode)
header.append(payload_len)
elif payload_len >= 126 and payload_len <= 65535:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT16)
header.extend(struct.pack(">H", payload_len))
elif payload_len < 18446744073709551616:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT64)
header.extend(struct.pack(">Q", payload_len))
try:
#self.request.send(header + payload)
self.wfile.write(header + payload)
return
except (ConnectionAbortedError, OSError):
self.is_connected = False
self.handshaked = False
return
def write_socket(self, msg):
if not msg:
return
if isinstance(msg, str):
msg = msg.encode()
payload = pickle.dumps(msg)
payload_len = struct.pack('!I', len(payload))
try:
#self.request.send(payload_len + payload)
self.wfile.write(payload_len + payload)
return
except (ConnectionResetError, BrokenPipeError):
self.is_connected = False
self.handshaked = False
return
def get_headers(self):
headers = {}
http_get = self.rfile.readline().decode().strip()
if http_get.upper()[:3] == 'GET':
while True:
header = self.rfile.readline().decode().strip()
if not header:
break
k, v = header.split(':', 1)
headers[k.lower().strip()] = v.strip()
return headers
def handshake(self):
headers = self.get_headers()
if not headers:
self.is_websocket = False
self.handshaked = True
self.server._client_add_(self)
return
if 'sec-websocket-key' not in headers:
self.is_connected = False
return
key = headers['sec-websocket-key']
res = self.handshake_res(key)
#self.handshaked = self.request.send(res.encode())
self.handshaked = self.wfile.write(res.encode())
self.server._client_add_(self)
def handshake_res(self, key):
res = 'HTTP/1.1 101 Switching Protocols\r\n'\
'Upgrade: websocket\r\n'\
'Connection: Upgrade\r\n'\
f'Sec-WebSocket-Accept: {self.calc_key(key)}\r\n\r\n'
return res
def calc_key(self, key):
hash = sha1(key.encode() + GUID)
res_key = b64encode(hash.digest()).strip()
return res_key.decode('ASCII')
def finish(self):
self.server._client_remove_(self)
StreamRequestHandler.finish(self)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,514
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/livedash/served.py
|
import http.server
import socketserver
PORT = 89
HOST = "0.0.0.0"
DIRECTORY = 'livedash'
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DIRECTORY, **kwargs)
def main():
with socketserver.ThreadingTCPServer((HOST, PORT), Handler) as httpd:
print(f"Serving at host {HOST} port {PORT}")
while True:
httpd.serve_forever()
if __name__ == "__main__":
main()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,515
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/op_edit.py
|
#!/usr/bin/env python3
# flake8: noqa
#pylint: skip-file
import time
import ast
import difflib
from common.op_params import opParams
from common.colors import COLORS
from collections import OrderedDict
class opEdit: # use by running `python /data/openpilot/op_edit.py`
def __init__(self):
self.op_params = opParams()
self.params = None
self.sleep_time = 0.75
self.live_tuning = self.op_params.get('op_edit_live_mode')
self.username = self.op_params.get('username')
self.type_colors = {int: COLORS.BASE(179), float: COLORS.BASE(179),
bool: {False: COLORS.RED, True: COLORS.OKGREEN},
type(None): COLORS.BASE(177),
str: COLORS.BASE(77)}
self.last_choice = None
self.run_init()
def run_init(self):
if self.username is None:
self.success('\nWelcome to the opParams command line editor!', sleep_time=0)
self.prompt('Parameter \'username\' is missing! Would you like to add your Discord username for easier crash debugging?')
username_choice = self.input_with_options(['Y', 'n', 'don\'t ask again'], default='n')[0]
if username_choice == 0:
self.prompt('Please enter your Discord username so the developers can reach out if a crash occurs:')
username = ''
while username == '':
username = input('>> ').strip()
self.success('Thanks! Saved your Discord username\n'
'Edit the \'username\' parameter at any time to update', sleep_time=3.0)
self.op_params.put('username', username)
self.username = username
elif username_choice == 2:
self.op_params.put('username', False)
self.info('Got it, bringing you into opEdit\n'
'Edit the \'username\' parameter at any time to update', sleep_time=3.0)
else:
self.success('\nWelcome to the opParams command line editor, {}!'.format(self.username), sleep_time=0)
self.run_loop()
def run_loop(self):
while True:
if not self.live_tuning:
self.info('Here are your parameters:', end='\n', sleep_time=0)
else:
self.info('Here are your live parameters:', sleep_time=0)
self.info('(changes take effect within {} seconds)'.format(self.op_params.read_frequency), end='\n', sleep_time=0)
self.params = self.op_params.get(force_live=True)
if self.live_tuning: # only display live tunable params
self.params = {k: v for k, v in self.params.items() if self.op_params.param_info(k).live}
self.params = OrderedDict(sorted(self.params.items(), key=self.sort_params))
values_list = []
for k, v in self.params.items():
if len(str(v)) < 30 or len(str(v)) <= len('{} ... {}'.format(str(v)[:30], str(v)[-15:])):
v_color = ''
if type(v) in self.type_colors:
v_color = self.type_colors[type(v)]
if isinstance(v, bool):
v_color = v_color[v]
v = '{}{}{}'.format(v_color, v, COLORS.ENDC)
else:
v = '{} ... {}'.format(str(v)[:30], str(v)[-15:])
values_list.append(v)
live = [COLORS.INFO + '(live!)' + COLORS.ENDC if self.op_params.param_info(k).live else '' for k in self.params]
to_print = []
blue_gradient = [33, 39, 45, 51, 87]
last_key = ''
last_info = None
shown_dots = False
for idx, param in enumerate(self.params):
info = self.op_params.param_info(param)
indent = self.get_sort_key(param).count(',')
line = ''
if not info.depends_on or param in last_info.children and \
self.op_params.get(last_key) and self.op_params.get(info.depends_on):
line = '{}. {}: {} {}'.format(idx + 1, param, values_list[idx], live[idx])
line = indent * '.' + line
elif not shown_dots and last_info and param in last_info.children:
line = '...'
shown_dots = True
if line:
if idx == self.last_choice and self.last_choice is not None:
line = COLORS.OKGREEN + line
else:
_color = blue_gradient[min(round(idx / len(self.params) * len(blue_gradient)), len(blue_gradient) - 1)]
line = COLORS.BASE(_color) + line
if last_info and len(last_info.children) and indent == 0:
line = '\n' + line
shown_dots = False
to_print.append(line)
if indent == 0:
last_key = param
last_info = info
extras = {'a': ('Add new parameter', COLORS.OKGREEN),
'd': ('Delete parameter', COLORS.FAIL),
'l': ('Toggle live tuning', COLORS.WARNING),
'e': ('Exit opEdit', COLORS.PINK)}
to_print += ['---'] + ['{}. {}'.format(ext_col + e, ext_txt + COLORS.ENDC) for e, (ext_txt, ext_col) in extras.items()]
print('\n'.join(to_print))
self.prompt('\nChoose a parameter to edit (by index or name):')
choice = input('>> ').strip().lower()
parsed, choice = self.parse_choice(choice, len(self.params) + len(extras))
if parsed == 'continue':
continue
elif parsed == 'add':
self.add_parameter()
elif parsed == 'change':
self.last_choice = choice
self.change_parameter(choice)
elif parsed == 'delete':
self.delete_parameter()
elif parsed == 'live':
self.last_choice = None
self.live_tuning = not self.live_tuning
self.op_params.put('op_edit_live_mode', self.live_tuning) # for next opEdit startup
elif parsed == 'exit':
return
def parse_choice(self, choice, opt_len):
if choice.isdigit():
choice = int(choice)
choice -= 1
if choice not in range(opt_len): # number of options to choose from
self.error('Not in range!')
return 'continue', choice
return 'change', choice
if choice in ['a', 'add']: # add new parameter
return 'add', choice
elif choice in ['d', 'delete', 'del']: # delete parameter
return 'delete', choice
elif choice in ['l', 'live']: # live tuning mode
return 'live', choice
elif choice in ['exit', 'e', '']:
self.error('Exiting opEdit!', sleep_time=0)
return 'exit', choice
else: # find most similar param to user's input
param_sims = [(idx, self.str_sim(choice, param.lower())) for idx, param in enumerate(self.params)]
param_sims = [param for param in param_sims if param[1] > 0.5]
if len(param_sims) > 0:
chosen_param = sorted(param_sims, key=lambda param: param[1], reverse=True)[0]
return 'change', chosen_param[0] # return idx
self.error('Invalid choice!')
return 'continue', choice
def str_sim(self, a, b):
return difflib.SequenceMatcher(a=a, b=b).ratio()
def change_parameter(self, choice):
while True:
chosen_key = list(self.params)[choice]
param_info = self.op_params.param_info(chosen_key)
old_value = self.params[chosen_key]
self.info('Chosen parameter: {}'.format(chosen_key), sleep_time=0)
to_print = []
if param_info.has_description:
to_print.append(COLORS.OKGREEN + '>> Description: {}'.format(param_info.description.replace('\n', '\n > ')) + COLORS.ENDC)
if param_info.has_allowed_types:
to_print.append(COLORS.RED + '>> Allowed types: {}'.format(', '.join([at.__name__ if isinstance(at, type) else at for at in param_info.allowed_types])) + COLORS.ENDC)
if param_info.live:
live_msg = '>> This parameter supports live tuning!'
if not self.live_tuning:
live_msg += ' Updates should take effect within {} seconds'.format(self.op_params.read_frequency)
to_print.append(COLORS.YELLOW + live_msg + COLORS.ENDC)
if to_print:
print('\n{}\n'.format('\n'.join(to_print)))
if param_info.is_list:
self.change_param_list(old_value, param_info, chosen_key) # TODO: need to merge the code in this function with the below to reduce redundant code
return
self.info('Current value: {} (type: {})'.format(old_value, type(old_value).__name__), sleep_time=0)
while True:
if param_info.live:
self.prompt('\nEnter your new value or [Enter] to exit:')
else:
self.prompt('\nEnter your new value:')
new_value = input('>> ').strip()
if new_value == '':
self.info('Exiting this parameter...', 0.5)
return
new_value = self.str_eval(new_value)
if param_info.is_bool and type(new_value) is int:
new_value = bool(new_value)
if not param_info.is_valid(new_value):
self.error('The type of data you entered ({}) is not allowed with this parameter!'.format(type(new_value).__name__))
continue
if param_info.live: # stay in live tuning interface
self.op_params.put(chosen_key, new_value)
self.success('Saved {} with value: {}! (type: {})'.format(chosen_key, new_value, type(new_value).__name__))
else: # else ask to save and break
print('\nOld value: {} (type: {})'.format(old_value, type(old_value).__name__))
print('New value: {} (type: {})'.format(new_value, type(new_value).__name__))
self.prompt('\nDo you want to save this?')
if self.input_with_options(['Y', 'n'], 'n')[0] == 0:
self.op_params.put(chosen_key, new_value)
self.success('Saved!')
else:
self.info('Not saved!', sleep_time=0)
return
def change_param_list(self, old_value, param_info, chosen_key):
while True:
self.info('Current value: {} (type: {})'.format(old_value, type(old_value).__name__), sleep_time=0)
self.prompt('\nEnter index to edit (0 to {}), or -i to remove index, or +value to append value:'.format(len(old_value) - 1))
append_val = False
remove_idx = False
choice_idx = input('>> ')
if choice_idx == '':
self.info('Exiting this parameter...', 0.5)
return
if isinstance(choice_idx, str):
if choice_idx[0] == '-':
remove_idx = True
if choice_idx[0] == '+':
append_val = True
if append_val or remove_idx:
choice_idx = choice_idx[1::]
choice_idx = self.str_eval(choice_idx)
is_list = isinstance(choice_idx, list)
if not append_val and not (is_list or (isinstance(choice_idx, int) and choice_idx in range(len(old_value)))):
self.error('Must be an integar within list range!')
continue
while True:
if append_val or remove_idx or is_list:
new_value = choice_idx
else:
self.info('Chosen index: {}'.format(choice_idx), sleep_time=0)
self.info('Value: {} (type: {})'.format(old_value[choice_idx], type(old_value[choice_idx]).__name__), sleep_time=0)
self.prompt('\nEnter your new value:')
new_value = input('>> ').strip()
new_value = self.str_eval(new_value)
if new_value == '':
self.info('Exiting this list item...', 0.5)
break
if not param_info.is_valid(new_value):
self.error('The type of data you entered ({}) is not allowed with this parameter!'.format(type(new_value).__name__))
break
if append_val:
old_value.append(new_value)
elif remove_idx:
del old_value[choice_idx]
elif is_list:
old_value = new_value
else:
old_value[choice_idx] = new_value
self.op_params.put(chosen_key, old_value)
self.success('Saved {} with value: {}! (type: {})'.format(chosen_key, new_value, type(new_value).__name__), end='\n')
break
def cyan(self, msg, end=''):
msg = self.str_color(msg, style='cyan')
# print(msg, flush=True, end='\n' + end)
return msg
def prompt(self, msg, end=''):
msg = self.str_color(msg, style='prompt')
print(msg, flush=True, end='\n' + end)
def info(self, msg, sleep_time=None, end=''):
if sleep_time is None:
sleep_time = self.sleep_time
msg = self.str_color(msg, style='info')
print(msg, flush=True, end='\n' + end)
time.sleep(sleep_time)
def error(self, msg, sleep_time=None, end='', surround=True):
if sleep_time is None:
sleep_time = self.sleep_time
msg = self.str_color(msg, style='fail', surround=surround)
print(msg, flush=True, end='\n' + end)
time.sleep(sleep_time)
def success(self, msg, sleep_time=None, end=''):
if sleep_time is None:
sleep_time = self.sleep_time
msg = self.str_color(msg, style='success')
print(msg, flush=True, end='\n' + end)
time.sleep(sleep_time)
def str_color(self, msg, style, surround=False):
if style == 'success':
style = COLORS.SUCCESS
elif style == 'fail':
style = COLORS.FAIL
elif style == 'prompt':
style = COLORS.PROMPT
elif style == 'info':
style = COLORS.INFO
elif style == 'cyan':
style = COLORS.CYAN
if surround:
msg = '{}--------\n{}\n{}--------{}'.format(style, msg, COLORS.ENDC + style, COLORS.ENDC)
else:
msg = '{}{}{}'.format(style, msg, COLORS.ENDC)
return msg
def input_with_options(self, options, default=None):
"""
Takes in a list of options and asks user to make a choice.
The most similar option list index is returned along with the similarity percentage from 0 to 1
"""
user_input = input('[{}]: '.format('/'.join(options))).lower().strip()
if not user_input:
return default, 0.0
sims = [self.str_sim(i.lower().strip(), user_input) for i in options]
argmax = sims.index(max(sims))
return argmax, sims[argmax]
def str_eval(self, dat):
dat = dat.strip()
try:
dat = ast.literal_eval(dat)
except:
if dat.lower() == 'none':
dat = None
elif dat.lower() in ['false', 'f']:
dat = False
elif dat.lower() in ['true', 't']: # else, assume string
dat = True
return dat
def delete_parameter(self):
while True:
self.prompt('Enter the name of the parameter to delete:')
key = self.str_eval(input('>> '))
if key == '':
return
if not isinstance(key, str):
self.error('Input must be a string!')
continue
if key not in self.params:
self.error("Parameter doesn't exist!")
continue
value = self.params.get(key)
print('Parameter name: {}'.format(key))
print('Parameter value: {} (type: {})'.format(value, type(value).__name__))
self.prompt('Do you want to delete this?')
if self.input_with_options(['Y', 'n'], default='n')[0] == 0:
self.op_params.delete(key)
self.success('Deleted!')
else:
self.info('Not deleted!')
return
def add_parameter(self):
while True:
self.prompt('Type the name of your new parameter:')
key = self.str_eval(input('>> '))
if key == '':
return
if not isinstance(key, str):
self.error('Input must be a string!')
continue
self.prompt("Enter the data you'd like to save with this parameter:")
value = input('>> ').strip()
value = self.str_eval(value)
print('Parameter name: {}'.format(key))
print('Parameter value: {} (type: {})'.format(value, type(value).__name__))
self.prompt('Do you want to save this?')
if self.input_with_options(['Y', 'n'], default='n')[0] == 0:
self.op_params.put(key, value)
self.success('Saved!')
else:
self.info('Not saved!')
return
def sort_params(self, kv):
return self.get_sort_key(kv[0])
def get_sort_key(self, k):
p = self.op_params.param_info(k)
if not p.depends_on:
return f'{1 if not len(p.children) else 0}{k}'
else:
s = ''
while p.depends_on:
s = f'{p.depends_on},{s}'
p = self.op_params.param_info(p.depends_on)
return f'{0}{s}{k}'
opEdit()
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,516
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/external/simpleperf/utils.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""utils.py: export utility functions.
"""
from __future__ import print_function
import logging
import os
import os.path
import re
import shutil
import subprocess
import sys
import time
def get_script_dir():
return os.path.dirname(os.path.realpath(__file__))
def is_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
def is_darwin():
return sys.platform == 'darwin'
def get_platform():
if is_windows():
return 'windows'
if is_darwin():
return 'darwin'
return 'linux'
def is_python3():
return sys.version_info >= (3, 0)
def log_debug(msg):
logging.debug(msg)
def log_info(msg):
logging.info(msg)
def log_warning(msg):
logging.warning(msg)
def log_fatal(msg):
raise Exception(msg)
def log_exit(msg):
sys.exit(msg)
def disable_debug_log():
logging.getLogger().setLevel(logging.WARN)
def str_to_bytes(str):
if not is_python3():
return str
# In python 3, str are wide strings whereas the C api expects 8 bit strings,
# hence we have to convert. For now using utf-8 as the encoding.
return str.encode('utf-8')
def bytes_to_str(bytes):
if not is_python3():
return bytes
return bytes.decode('utf-8')
def get_target_binary_path(arch, binary_name):
if arch == 'aarch64':
arch = 'arm64'
arch_dir = os.path.join(get_script_dir(), "bin", "android", arch)
if not os.path.isdir(arch_dir):
log_fatal("can't find arch directory: %s" % arch_dir)
binary_path = os.path.join(arch_dir, binary_name)
if not os.path.isfile(binary_path):
log_fatal("can't find binary: %s" % binary_path)
return binary_path
def get_host_binary_path(binary_name):
dir = os.path.join(get_script_dir(), 'bin')
if is_windows():
if binary_name.endswith('.so'):
binary_name = binary_name[0:-3] + '.dll'
elif '.' not in binary_name:
binary_name += '.exe'
dir = os.path.join(dir, 'windows')
elif sys.platform == 'darwin': # OSX
if binary_name.endswith('.so'):
binary_name = binary_name[0:-3] + '.dylib'
dir = os.path.join(dir, 'darwin')
else:
dir = os.path.join(dir, 'linux')
dir = os.path.join(dir, 'x86_64' if sys.maxsize > 2 ** 32 else 'x86')
binary_path = os.path.join(dir, binary_name)
if not os.path.isfile(binary_path):
log_fatal("can't find binary: %s" % binary_path)
return binary_path
def is_executable_available(executable, option='--help'):
""" Run an executable to see if it exists. """
try:
subproc = subprocess.Popen([executable, option], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
subproc.communicate()
return subproc.returncode == 0
except:
return False
DEFAULT_NDK_PATH = {
'darwin': 'Library/Android/sdk/ndk-bundle',
'linux': 'Android/Sdk/ndk-bundle',
'windows': 'AppData/Local/Android/sdk/ndk-bundle',
}
EXPECTED_TOOLS = {
'adb': {
'is_binutils': False,
'test_option': 'version',
'path_in_ndk': '../platform-tools/adb',
},
'readelf': {
'is_binutils': True,
'accept_tool_without_arch': True,
},
'addr2line': {
'is_binutils': True,
'accept_tool_without_arch': True
},
'objdump': {
'is_binutils': True,
},
}
def _get_binutils_path_in_ndk(toolname, arch, platform):
if not arch:
arch = 'arm64'
if arch == 'arm64':
name = 'aarch64-linux-android-' + toolname
path = 'toolchains/aarch64-linux-android-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
elif arch == 'arm':
name = 'arm-linux-androideabi-' + toolname
path = 'toolchains/arm-linux-androideabi-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
elif arch == 'x86_64':
name = 'x86_64-linux-android-' + toolname
path = 'toolchains/x86_64-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
elif arch == 'x86':
name = 'i686-linux-android-' + toolname
path = 'toolchains/x86-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
else:
log_fatal('unexpected arch %s' % arch)
return (name, path)
def find_tool_path(toolname, ndk_path=None, arch=None):
if toolname not in EXPECTED_TOOLS:
return None
tool_info = EXPECTED_TOOLS[toolname]
is_binutils = tool_info['is_binutils']
test_option = tool_info.get('test_option', '--help')
platform = get_platform()
if is_binutils:
toolname_with_arch, path_in_ndk = _get_binutils_path_in_ndk(toolname, arch, platform)
else:
toolname_with_arch = toolname
path_in_ndk = tool_info['path_in_ndk']
path_in_ndk = path_in_ndk.replace('/', os.sep)
# 1. Find tool in the given ndk path.
if ndk_path:
path = os.path.join(ndk_path, path_in_ndk)
if is_executable_available(path, test_option):
return path
# 2. Find tool in the ndk directory containing simpleperf scripts.
path = os.path.join('..', path_in_ndk)
if is_executable_available(path, test_option):
return path
# 3. Find tool in the default ndk installation path.
home = os.environ.get('HOMEPATH') if is_windows() else os.environ.get('HOME')
if home:
default_ndk_path = os.path.join(home, DEFAULT_NDK_PATH[platform].replace('/', os.sep))
path = os.path.join(default_ndk_path, path_in_ndk)
if is_executable_available(path, test_option):
return path
# 4. Find tool in $PATH.
if is_executable_available(toolname_with_arch, test_option):
return toolname_with_arch
# 5. Find tool without arch in $PATH.
if is_binutils and tool_info.get('accept_tool_without_arch'):
if is_executable_available(toolname, test_option):
return toolname
return None
class AdbHelper(object):
def __init__(self, enable_switch_to_root=True):
adb_path = find_tool_path('adb')
if not adb_path:
log_exit("Can't find adb in PATH environment.")
self.adb_path = adb_path
self.enable_switch_to_root = enable_switch_to_root
def run(self, adb_args):
return self.run_and_return_output(adb_args)[0]
def run_and_return_output(self, adb_args, stdout_file=None, log_output=True):
adb_args = [self.adb_path] + adb_args
log_debug('run adb cmd: %s' % adb_args)
if stdout_file:
with open(stdout_file, 'wb') as stdout_fh:
returncode = subprocess.call(adb_args, stdout=stdout_fh)
stdoutdata = ''
else:
subproc = subprocess.Popen(adb_args, stdout=subprocess.PIPE)
(stdoutdata, _) = subproc.communicate()
returncode = subproc.returncode
result = (returncode == 0)
if stdoutdata and adb_args[1] != 'push' and adb_args[1] != 'pull':
stdoutdata = bytes_to_str(stdoutdata)
if log_output:
log_debug(stdoutdata)
log_debug('run adb cmd: %s [result %s]' % (adb_args, result))
return (result, stdoutdata)
def check_run(self, adb_args):
self.check_run_and_return_output(adb_args)
def check_run_and_return_output(self, adb_args, stdout_file=None, log_output=True):
result, stdoutdata = self.run_and_return_output(adb_args, stdout_file, log_output)
if not result:
log_exit('run "adb %s" failed' % adb_args)
return stdoutdata
def _unroot(self):
result, stdoutdata = self.run_and_return_output(['shell', 'whoami'])
if not result:
return
if 'root' not in stdoutdata:
return
log_info('unroot adb')
self.run(['unroot'])
self.run(['wait-for-device'])
time.sleep(1)
def switch_to_root(self):
if not self.enable_switch_to_root:
self._unroot()
return False
result, stdoutdata = self.run_and_return_output(['shell', 'whoami'])
if not result:
return False
if 'root' in stdoutdata:
return True
build_type = self.get_property('ro.build.type')
if build_type == 'user':
return False
self.run(['root'])
time.sleep(1)
self.run(['wait-for-device'])
result, stdoutdata = self.run_and_return_output(['shell', 'whoami'])
return result and 'root' in stdoutdata
def get_property(self, name):
result, stdoutdata = self.run_and_return_output(['shell', 'getprop', name])
return stdoutdata if result else None
def set_property(self, name, value):
return self.run(['shell', 'setprop', name, value])
def get_device_arch(self):
output = self.check_run_and_return_output(['shell', 'uname', '-m'])
if 'aarch64' in output:
return 'arm64'
if 'arm' in output:
return 'arm'
if 'x86_64' in output:
return 'x86_64'
if '86' in output:
return 'x86'
log_fatal('unsupported architecture: %s' % output.strip())
def get_android_version(self):
build_version = self.get_property('ro.build.version.release')
android_version = 0
if build_version:
if not build_version[0].isdigit():
c = build_version[0].upper()
if c.isupper() and c >= 'L':
android_version = ord(c) - ord('L') + 5
else:
strs = build_version.split('.')
if strs:
android_version = int(strs[0])
return android_version
def flatten_arg_list(arg_list):
res = []
if arg_list:
for items in arg_list:
res += items
return res
def remove(dir_or_file):
if os.path.isfile(dir_or_file):
os.remove(dir_or_file)
elif os.path.isdir(dir_or_file):
shutil.rmtree(dir_or_file, ignore_errors=True)
def open_report_in_browser(report_path):
if is_darwin():
# On darwin 10.12.6, webbrowser can't open browser, so try `open` cmd first.
try:
subprocess.check_call(['open', report_path])
return
except:
pass
import webbrowser
try:
# Try to open the report with Chrome
browser_key = ''
for key, _ in webbrowser._browsers.items():
if 'chrome' in key:
browser_key = key
browser = webbrowser.get(browser_key)
browser.open(report_path, new=0, autoraise=True)
except:
# webbrowser.get() doesn't work well on darwin/windows.
webbrowser.open_new_tab(report_path)
def find_real_dso_path(dso_path_in_record_file, binary_cache_path):
""" Given the path of a shared library in perf.data, find its real path in the file system. """
if dso_path_in_record_file[0] != '/' or dso_path_in_record_file == '//anon':
return None
if binary_cache_path:
tmp_path = os.path.join(binary_cache_path, dso_path_in_record_file[1:])
if os.path.isfile(tmp_path):
return tmp_path
if os.path.isfile(dso_path_in_record_file):
return dso_path_in_record_file
return None
class Addr2Nearestline(object):
""" Use addr2line to convert (dso_path, func_addr, addr) to (source_file, line) pairs.
For instructions generated by C++ compilers without a matching statement in source code
(like stack corruption check, switch optimization, etc.), addr2line can't generate
line information. However, we want to assign the instruction to the nearest line before
the instruction (just like objdump -dl). So we use below strategy:
Instead of finding the exact line of the instruction in an address, we find the nearest
line to the instruction in an address. If an address doesn't have a line info, we find
the line info of address - 1. If still no line info, then use address - 2, address - 3,
etc.
The implementation steps are as below:
1. Collect all (dso_path, func_addr, addr) requests before converting. This saves the
times to call addr2line.
2. Convert addrs to (source_file, line) pairs for each dso_path as below:
2.1 Check if the dso_path has .debug_line. If not, omit its conversion.
2.2 Get arch of the dso_path, and decide the addr_step for it. addr_step is the step we
change addr each time. For example, since instructions of arm64 are all 4 bytes long,
addr_step for arm64 can be 4.
2.3 Use addr2line to find line info for each addr in the dso_path.
2.4 For each addr without line info, use addr2line to find line info for
range(addr - addr_step, addr - addr_step * 4 - 1, -addr_step).
2.5 For each addr without line info, use addr2line to find line info for
range(addr - addr_step * 5, addr - addr_step * 128 - 1, -addr_step).
(128 is a guess number. A nested switch statement in
system/core/demangle/Demangler.cpp has >300 bytes without line info in arm64.)
"""
class Dso(object):
""" Info of a dynamic shared library.
addrs: a map from address to Addr object in this dso.
"""
def __init__(self):
self.addrs = {}
class Addr(object):
""" Info of an addr request.
func_addr: start_addr of the function containing addr.
source_lines: a list of [file_id, line_number] for addr.
source_lines[:-1] are all for inlined functions.
"""
def __init__(self, func_addr):
self.func_addr = func_addr
self.source_lines = None
def __init__(self, ndk_path, binary_cache_path):
self.addr2line_path = find_tool_path('addr2line', ndk_path)
if not self.addr2line_path:
log_exit("Can't find addr2line. Please set ndk path with --ndk-path option.")
self.readelf = ReadElf(ndk_path)
self.dso_map = {} # map from dso_path to Dso.
self.binary_cache_path = binary_cache_path
# Saving file names for each addr takes a lot of memory. So we store file ids in Addr,
# and provide data structures connecting file id and file name here.
self.file_name_to_id = {}
self.file_id_to_name = []
def add_addr(self, dso_path, func_addr, addr):
dso = self.dso_map.get(dso_path)
if dso is None:
dso = self.dso_map[dso_path] = self.Dso()
if addr not in dso.addrs:
dso.addrs[addr] = self.Addr(func_addr)
def convert_addrs_to_lines(self):
for dso_path in self.dso_map:
self._convert_addrs_in_one_dso(dso_path, self.dso_map[dso_path])
def _convert_addrs_in_one_dso(self, dso_path, dso):
real_path = find_real_dso_path(dso_path, self.binary_cache_path)
if not real_path:
if dso_path not in ['//anon', 'unknown', '[kernel.kallsyms]']:
log_debug("Can't find dso %s" % dso_path)
return
if not self._check_debug_line_section(real_path):
log_debug("file %s doesn't contain .debug_line section." % real_path)
return
addr_step = self._get_addr_step(real_path)
self._collect_line_info(dso, real_path, [0])
self._collect_line_info(dso, real_path, range(-addr_step, -addr_step * 4 - 1, -addr_step))
self._collect_line_info(dso, real_path,
range(-addr_step * 5, -addr_step * 128 - 1, -addr_step))
def _check_debug_line_section(self, real_path):
return '.debug_line' in self.readelf.get_sections(real_path)
def _get_addr_step(self, real_path):
arch = self.readelf.get_arch(real_path)
if arch == 'arm64':
return 4
if arch == 'arm':
return 2
return 1
def _collect_line_info(self, dso, real_path, addr_shifts):
""" Use addr2line to get line info in a dso, with given addr shifts. """
# 1. Collect addrs to send to addr2line.
addr_set = set()
for addr in dso.addrs:
addr_obj = dso.addrs[addr]
if addr_obj.source_lines: # already has source line, no need to search.
continue
for shift in addr_shifts:
# The addr after shift shouldn't change to another function.
shifted_addr = max(addr + shift, addr_obj.func_addr)
addr_set.add(shifted_addr)
if shifted_addr == addr_obj.func_addr:
break
if not addr_set:
return
addr_request = '\n'.join(['%x' % addr for addr in sorted(addr_set)])
# 2. Use addr2line to collect line info.
try:
subproc = subprocess.Popen([self.addr2line_path, '-ai', '-e', real_path],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdoutdata, _) = subproc.communicate(str_to_bytes(addr_request))
stdoutdata = bytes_to_str(stdoutdata)
except:
return
addr_map = {}
cur_line_list = None
for line in stdoutdata.strip().split('\n'):
if line[:2] == '0x':
# a new address
cur_line_list = addr_map[int(line, 16)] = []
else:
# a file:line.
if cur_line_list is None:
continue
# Handle lines like "C:\Users\...\file:32".
items = line.rsplit(':', 1)
if len(items) != 2:
continue
if '?' in line:
# if ? in line, it doesn't have a valid line info.
# An addr can have a list of (file, line), when the addr belongs to an inlined
# function. Sometimes only part of the list has ? mark. In this case, we think
# the line info is valid if the first line doesn't have ? mark.
if not cur_line_list:
cur_line_list = None
continue
(file_path, line_number) = items
line_number = line_number.split()[0] # Remove comments after line number
try:
line_number = int(line_number)
except ValueError:
continue
file_id = self._get_file_id(file_path)
cur_line_list.append((file_id, line_number))
# 3. Fill line info in dso.addrs.
for addr in dso.addrs:
addr_obj = dso.addrs[addr]
if addr_obj.source_lines:
continue
for shift in addr_shifts:
shifted_addr = max(addr + shift, addr_obj.func_addr)
lines = addr_map.get(shifted_addr)
if lines:
addr_obj.source_lines = lines
break
if shifted_addr == addr_obj.func_addr:
break
def _get_file_id(self, file_path):
file_id = self.file_name_to_id.get(file_path)
if file_id is None:
file_id = self.file_name_to_id[file_path] = len(self.file_id_to_name)
self.file_id_to_name.append(file_path)
return file_id
def get_dso(self, dso_path):
return self.dso_map.get(dso_path)
def get_addr_source(self, dso, addr):
source = dso.addrs[addr].source_lines
if source is None:
return None
return [(self.file_id_to_name[file_id], line) for (file_id, line) in source]
class Objdump(object):
""" A wrapper of objdump to disassemble code. """
def __init__(self, ndk_path, binary_cache_path):
self.ndk_path = ndk_path
self.binary_cache_path = binary_cache_path
self.readelf = ReadElf(ndk_path)
self.objdump_paths = {}
def disassemble_code(self, dso_path, start_addr, addr_len):
""" Disassemble [start_addr, start_addr + addr_len] of dso_path.
Return a list of pair (disassemble_code_line, addr).
"""
# 1. Find real path.
real_path = find_real_dso_path(dso_path, self.binary_cache_path)
if real_path is None:
return None
# 2. Get path of objdump.
arch = self.readelf.get_arch(real_path)
if arch == 'unknown':
return None
objdump_path = self.objdump_paths.get(arch)
if not objdump_path:
objdump_path = find_tool_path('objdump', self.ndk_path, arch)
if not objdump_path:
log_exit("Can't find objdump. Please set ndk path with --ndk_path option.")
self.objdump_paths[arch] = objdump_path
# 3. Run objdump.
args = [objdump_path, '-dlC', '--no-show-raw-insn',
'--start-address=0x%x' % start_addr,
'--stop-address=0x%x' % (start_addr + addr_len),
real_path]
try:
subproc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdoutdata, _) = subproc.communicate()
stdoutdata = bytes_to_str(stdoutdata)
except:
return None
if not stdoutdata:
return None
result = []
for line in stdoutdata.split('\n'):
line = line.rstrip() # Remove '\r' on Windows.
items = line.split(':', 1)
try:
addr = int(items[0], 16)
except ValueError:
addr = 0
result.append((line, addr))
return result
class ReadElf(object):
""" A wrapper of readelf. """
def __init__(self, ndk_path):
self.readelf_path = find_tool_path('readelf', ndk_path)
if not self.readelf_path:
log_exit("Can't find readelf. Please set ndk path with --ndk_path option.")
def get_arch(self, elf_file_path):
""" Get arch of an elf file. """
try:
output = subprocess.check_output([self.readelf_path, '-h', elf_file_path])
if output.find('AArch64') != -1:
return 'arm64'
if output.find('ARM') != -1:
return 'arm'
if output.find('X86-64') != -1:
return 'x86_64'
if output.find('80386') != -1:
return 'x86'
except subprocess.CalledProcessError:
pass
return 'unknown'
def get_build_id(self, elf_file_path):
""" Get build id of an elf file. """
try:
output = subprocess.check_output([self.readelf_path, '-n', elf_file_path])
output = bytes_to_str(output)
result = re.search(r'Build ID:\s*(\S+)', output)
if result:
build_id = result.group(1)
if len(build_id) < 40:
build_id += '0' * (40 - len(build_id))
else:
build_id = build_id[:40]
build_id = '0x' + build_id
return build_id
except subprocess.CalledProcessError:
pass
return ""
def get_sections(self, elf_file_path):
""" Get sections of an elf file. """
section_names = []
try:
output = subprocess.check_output([self.readelf_path, '-SW', elf_file_path])
output = bytes_to_str(output)
for line in output.split('\n'):
# Parse line like:" [ 1] .note.android.ident NOTE 0000000000400190 ...".
result = re.search(r'^\s+\[\s*\d+\]\s(.+?)\s', line)
if result:
section_name = result.group(1).strip()
if section_name:
section_names.append(section_name)
except subprocess.CalledProcessError:
pass
return section_names
def extant_dir(arg):
"""ArgumentParser type that only accepts extant directories.
Args:
arg: The string argument given on the command line.
Returns: The argument as a realpath.
Raises:
argparse.ArgumentTypeError: The given path isn't a directory.
"""
path = os.path.realpath(arg)
if not os.path.isdir(path):
import argparse
raise argparse.ArgumentTypeError('{} is not a directory.'.format(path))
return path
logging.getLogger().setLevel(logging.DEBUG)
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
16,594,517
|
rav4kumar/openpilot
|
refs/heads/DP08
|
/selfdrive/test/profiling/profiler.py
|
#!/usr/bin/env python3
import os
import sys
import cProfile # pylint: disable=import-error
import pprofile # pylint: disable=import-error
import pyprof2calltree # pylint: disable=import-error
from common.params import Params
from tools.lib.logreader import LogReader
from selfdrive.test.profiling.lib import SubMaster, PubMaster, SubSocket, ReplayDone
from selfdrive.test.process_replay.process_replay import CONFIGS
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
CARS = {
'toyota': ("77611a1fac303767|2020-02-29--13-29-33/3", "TOYOTA COROLLA TSS2 2019"),
'honda': ("99c94dc769b5d96e|2019-08-03--14-19-59/2", "HONDA CIVIC 2016 TOURING"),
}
def get_inputs(msgs, process):
for config in CONFIGS:
if config.proc_name == process:
sub_socks = list(config.pub_sub.keys())
trigger = sub_socks[0]
break
# some procs block on CarParams
for msg in msgs:
if msg.which() == 'carParams':
Params().put("CarParams", msg.as_builder().to_bytes())
break
sm = SubMaster(msgs, trigger, sub_socks)
pm = PubMaster()
if 'can' in sub_socks:
can_sock = SubSocket(msgs, 'can')
else:
can_sock = None
return sm, pm, can_sock
def profile(proc, func, car='toyota'):
segment, fingerprint = CARS[car]
segment = segment.replace('|', '/')
rlog_url = f"{BASE_URL}{segment}/rlog.bz2"
msgs = list(LogReader(rlog_url)) * int(os.getenv("LOOP", "1"))
os.environ['FINGERPRINT'] = fingerprint
def run(sm, pm, can_sock):
try:
if can_sock is not None:
func(sm, pm, can_sock)
else:
func(sm, pm)
except ReplayDone:
pass
# Statistical
sm, pm, can_sock = get_inputs(msgs, proc)
with pprofile.StatisticalProfile()(period=0.00001) as pr:
run(sm, pm, can_sock)
pr.dump_stats(f'cachegrind.out.{proc}_statistical')
# Deterministic
sm, pm, can_sock = get_inputs(msgs, proc)
with cProfile.Profile() as pr:
run(sm, pm, can_sock)
pyprof2calltree.convert(pr.getstats(), f'cachegrind.out.{proc}_deterministic')
if __name__ == '__main__':
from selfdrive.controls.controlsd import main as controlsd_thread
from selfdrive.controls.radard import radard_thread
from selfdrive.locationd.locationd import locationd_thread
from selfdrive.locationd.paramsd import main as paramsd_thread
procs = {
'radard': radard_thread,
'controlsd': controlsd_thread,
'locationd': locationd_thread,
'paramsd': paramsd_thread,
}
proc = sys.argv[1]
if proc not in procs:
print(f"{proc} not available")
sys.exit(0)
else:
profile(proc, procs[proc])
|
{"/selfdrive/debug/internal/sounds/test_sounds.py": ["/common/basedir.py"], "/selfdrive/debug/internal/sounds/test_sound_stability.py": ["/common/basedir.py"], "/selfdrive/test/process_replay/model_replay.py": ["/tools/lib/logreader.py"], "/selfdrive/controls/lib/dynamic_follow/__init__.py": ["/common/params.py", "/common/dp_time.py", "/common/dp_common.py", "/selfdrive/controls/lib/dynamic_follow/auto_df.py", "/selfdrive/controls/lib/dynamic_follow/support.py"], "/selfdrive/test/test_cpu_usage.py": ["/common/basedir.py", "/common/params.py"], "/common/i18n.py": ["/common/hardware.py"], "/laika_repo/tests/test_fetch_sat_info.py": ["/laika/__init__.py"], "/common/hardware.py": ["/common/hardware_tici.py", "/common/hardware_base.py"], "/selfdrive/interbridge/interbridged.py": ["/selfdrive/interbridge/unisocket.py"], "/common/dp_common.py": ["/common/params.py", "/common/travis_checker.py"], "/tools/carcontrols/debug_controls.py": ["/common/params.py"], "/common/op_params.py": ["/common/travis_checker.py", "/common/colors.py"], "/laika_repo/laika/astro_dog.py": ["/laika_repo/laika/helpers.py", "/laika_repo/laika/downloader.py"], "/laika_repo/tests/test_positioning.py": ["/laika/__init__.py", "/laika/downloader.py"], "/selfdrive/test/test_openpilot.py": ["/common/params.py"], "/tools/replay/camera.py": ["/common/basedir.py"], "/common/hardware_tici.py": ["/common/hardware_base.py"], "/laika_repo/laika/raw_gnss.py": ["/laika_repo/laika/helpers.py"], "/selfdrive/test/process_replay/update_model.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/inject_model.py"], "/selfdrive/car/volkswagen/carstate.py": ["/selfdrive/car/volkswagen/values.py"], "/selfdrive/controls/lib/dynamic_follow/auto_df.py": ["/common/travis_checker.py"], "/selfdrive/test/process_replay/camera_replay.py": ["/common/hardware.py", "/tools/lib/logreader.py"], "/selfdrive/data_collection/gps_uploader.py": ["/common/params.py", "/common/op_params.py"], "/common/travis_checker.py": ["/common/basedir.py"], "/tools/replay/sensorium.py": ["/tools/replay/lib/ui_helpers.py"], "/tools/replay/unlogger.py": ["/tools/lib/logreader.py", "/tools/lib/route_framereader.py"], "/selfdrive/dragonpilot/appd.py": ["/common/params.py", "/common/dp_conf.py"], "/selfdrive/crash.py": ["/common/params.py", "/common/hardware.py", "/common/op_params.py"], "/laika_repo/tests/test_ephemerides.py": ["/laika/__init__.py"], "/selfdrive/test/testing_closet_client.py": ["/common/params.py"], "/selfdrive/test/model_replay.py": ["/tools/lib/logreader.py", "/selfdrive/test/process_replay/camera_replay.py"], "/tools/webcam/accept_terms.py": ["/common/params.py"], "/laika/astro_dog.py": ["/laika/helpers.py", "/laika/downloader.py", "/laika/__init__.py"], "/selfdrive/mapd/mapd.py": ["/selfdrive/crash.py", "/common/params.py"], "/selfdrive/dragonpilot/systemd.py": ["/common/dp_conf.py", "/common/params.py", "/common/i18n.py", "/common/dp_common.py", "/common/dp_time.py", "/selfdrive/dragonpilot/dashcam.py", "/common/travis_checker.py"], "/op_edit.py": ["/common/op_params.py", "/common/colors.py"], "/selfdrive/test/profiling/profiler.py": ["/common/params.py", "/tools/lib/logreader.py"], "/selfdrive/controls/lib/planner.py": ["/common/params.py", "/common/op_params.py"], "/common/basedir.py": ["/common/hardware.py"], "/selfdrive/car/toyota/carstate.py": ["/common/params.py", "/common/travis_checker.py", "/common/op_params.py"], "/laika_repo/tests/test_prns.py": ["/laika/helpers.py"], "/selfdrive/golden/can_bridge.py": ["/common/params.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.