text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""
Utility functions for the RBM
Created on Fri May 10 2019
Adapted from pytorch-rbm project on GitHub
@author: João Henrique Rodrigues, IST
version: 1.0
"""
import torch
import numpy as np
class CategoricalRBM():
def __init__(self, n_features, n_hidden, n_diff, sum_data, cd=1, persistent=False, learning_rate=1e-3, momentum_coefficient=0.5, weight_decay=1e-4):
'''Class with the methods necessary to implement a restricted
Boltzmann machine capable of dealing with categorical data.
Parameters
----------
n_features : int
Number of features.
n_hidden: int
Number of hidden units.
n_diff: list
Number of possible values for the different features.
sum_data: list
Ratio of the feature values on the dataset.
cd: int, default 1
Number of steps in the contrastive divergence (CD) algorithm.
persistent: boolean, default False
Usage of persistent CD.
learning_rate: float, default 1e-3
Learning rate of the model.
momentum_coefficient: float, default 0.5
Momentum coefficient of the model, important for a faster learning.
weight_decay: float, default 1e-4
Weight decay, preventing that the weight values increase too much
during learning.
'''
self.n_features = n_features
self.n_hidden = n_hidden
self.cd_k = cd
self.learning_rate = learning_rate
self.momentum_coefficient = momentum_coefficient
self.weight_decay = weight_decay
self.n_diff = n_diff
self.n_visible = sum(n_diff)
self.last_chain = None
self.persistent = persistent
self.expanded = False
#create visible bias units for each values of the visible units
#self.visible_bias = torch.ones(self.total_visible) * 0.5
self.visible_bias = torch.from_numpy(np.log(sum_data/(1-sum_data))).float()
self.hidden_bias = torch.zeros(self.n_hidden)
self.weights = torch.randn(self.n_visible, self.n_hidden) * 0.01
self.weights_momentum = torch.zeros(self.n_visible, self.n_hidden)
self.visible_bias_momentum = torch.zeros(self.n_visible)
self.hidden_bias_momentum = torch.zeros(self.n_hidden)
def free_energy(self, visible_values):
wx_b = torch.matmul(visible_values, self.weights) + self.hidden_bias
vbias_term = torch.matmul(visible_values, self.visible_bias)
hidden_term = torch.sum(np.log(1 + np.exp(wx_b)), dim=1)
return -hidden_term - vbias_term
def sample_hidden(self, visible_values):
hidden_activations = torch.matmul(visible_values, self.weights) + self.hidden_bias
#hidden_activations = torch.matmul(visible_values, self.weights)
hidden_probabilities = self._sigmoid(hidden_activations)
return hidden_probabilities
def sample_hidden_state(self, visible_values):
hidden_probabilities = self.sample_hidden(visible_values)
hidden_activations = (hidden_probabilities >= self._random_probabilities(self.n_hidden)).float()
return hidden_activations
def sample_visible(self, hidden_values):
visible_activations = torch.zeros(hidden_values.shape[0],self.n_visible)
aux_index=0
#for each feature, compute comulative probability of assuming each of the possible values
for i in range(self.n_features):
visible_probabilities = torch.zeros(hidden_values.shape[0],self.n_diff[i])
visible_com_probabilities = torch.zeros(hidden_values.shape[0],self.n_diff[i]) #comulative probabilities
sum_exp = 0
for j in range(self.n_diff[i]):
sum_exp += np.exp(self.visible_bias[aux_index + j] + torch.matmul(hidden_values,self.weights[aux_index + j]))
#torch.matmul(hidden_values[0],self.weights[aux_index + j])
for j in range(self.n_diff[i]):
visible_probabilities[:,j] = np.divide(np.exp(self.visible_bias[aux_index + j] + torch.matmul(hidden_values,self.weights[aux_index + j])),sum_exp)
if j==0:
visible_com_probabilities[:,j] = visible_probabilities[:,j]
else:
visible_com_probabilities[:,j] = visible_com_probabilities[:,j-1] + visible_probabilities[:,j]
#aux_rand = self._random_probabilities(hidden_values.size(0)).float()
aux_rand = self._random_probabilities(1).float()
for k in range(hidden_values.shape[0]):
for j in range(self.n_diff[i]):
if j == 0 and aux_rand < visible_com_probabilities[k,j]:
visible_activations[k,aux_index + j] = 1
elif aux_rand > visible_com_probabilities[k,j-1] and aux_rand <= visible_com_probabilities[k,j]:
visible_activations[k,aux_index + j] = 1
break
aux_index += self.n_diff[i]
return visible_activations
def contrastive_divergence(self, input_data):
#self.free_energy(input_data)
# Positive phase
if self.last_chain is None or input_data.shape[0] != self.last_chain.shape[0]:
positive_hidden_probabilities = self.sample_hidden(input_data)
positive_hidden_activations = (positive_hidden_probabilities >= self._random_probabilities(self.n_hidden)).float()
else:
positive_hidden_probabilities = self.last_chain
positive_hidden_activations = (positive_hidden_probabilities >= self._random_probabilities(self.n_hidden)).float()
positive_associations = torch.matmul(input_data.t(), positive_hidden_activations)
# Negative phase
hidden_activations = positive_hidden_activations
for step in range(self.cd_k):
visible_activations = self.sample_visible(hidden_activations)
hidden_probabilities = self.sample_hidden(visible_activations)
hidden_activations = (hidden_probabilities >= self._random_probabilities(self.n_hidden)).float()
negative_visible_activations = visible_activations
negative_hidden_probabilities = hidden_probabilities
if self.persistent:
self.last_chain = negative_hidden_probabilities
#USE PROBABILITIES
#negative_associations = torch.matmul(negative_visible_activations.t(), negative_hidden_probabilities)
#USING ACTIVATIONS
negative_associations = torch.matmul(negative_visible_activations.t(), hidden_activations)
# Update parameters
self.weights_momentum *= self.momentum_coefficient
self.weights_momentum += (positive_associations - negative_associations)
self.visible_bias_momentum *= self.momentum_coefficient
self.visible_bias_momentum += torch.sum(input_data - negative_visible_activations, dim=0)
self.hidden_bias_momentum *= self.momentum_coefficient
self.hidden_bias_momentum += torch.sum(positive_hidden_probabilities - negative_hidden_probabilities, dim=0)
self.hidden_bias_momentum += torch.sum(positive_hidden_activations - hidden_activations, dim=0)
batch_size = input_data.shape[0]
self.weights += self.weights_momentum * self.learning_rate / batch_size
self.visible_bias += self.visible_bias_momentum * self.learning_rate / batch_size
self.hidden_bias += self.hidden_bias_momentum * self.learning_rate / batch_size
self.weights -= self.weights * self.weight_decay # L2 weight decay
# Compute reconstruction error
error = torch.sum((input_data - negative_visible_activations)**2)
return error
def update_momentum(self, value):
self.momentum_coefficient = value
self.learning_rate = self.learning_rate /2
def expand_hidden(self):
if self.expanded:
print('already expanded')
return self.n_hidden
else:
self.expanded==True
#avg_weight = np.average(abs(self.weights))
avg_weight = abs(self.weights).max()
avg_hidden_bias = np.average(abs(self.hidden_bias))
avg_weights_momentum = np.average(abs(self.weights_momentum))
avg_hidden_momentum = np.average(abs(self.hidden_bias_momentum))
aux = sum(self.n_diff)
for i in range(self.n_features-1,-1,-1):
aux = aux - self.n_diff[i]
self.hidden_bias = torch.cat((torch.tensor([avg_hidden_bias]),self.hidden_bias))
self.hidden_momentum = torch.cat((torch.tensor([avg_hidden_momentum]),self.hidden_bias_momentum))
tensor_weights = torch.zeros(sum(self.n_diff),1)
tensor_weights_momentum = torch.zeros(sum(self.n_diff),1)
tensor_weights[aux:aux+self.n_diff[i]] = float(avg_weight)
tensor_weights_momentum[aux:aux+self.n_diff[i]] = float(avg_weights_momentum)
self.weights = torch.cat((tensor_weights,self.weights),1)
self.weights_momentum = torch.cat((tensor_weights_momentum,self.weights_momentum),1)
self.n_hidden = self.n_hidden + self.n_features
return self.n_hidden
def _sigmoid(self, x):
return 1 / (1 + torch.exp(-x))
def _random_probabilities(self, num):
random_probabilities = torch.rand(num)
return random_probabilities
|
{"hexsha": "22d0fd80aed7a267904a62d89984a7919f607e3b", "size": 9746, "ext": "py", "lang": "Python", "max_stars_repo_path": "rbm_categorical.py", "max_stars_repo_name": "joaor96/BLADE", "max_stars_repo_head_hexsha": "6f0cd0e7e5fe8f7d200a63719ecb347987fd9a1e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-12T22:05:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-29T08:52:05.000Z", "max_issues_repo_path": "rbm_categorical.py", "max_issues_repo_name": "joaor96/RBM-tDBN", "max_issues_repo_head_hexsha": "6f0cd0e7e5fe8f7d200a63719ecb347987fd9a1e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rbm_categorical.py", "max_forks_repo_name": "joaor96/RBM-tDBN", "max_forks_repo_head_hexsha": "6f0cd0e7e5fe8f7d200a63719ecb347987fd9a1e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-24T15:57:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-24T15:57:15.000Z", "avg_line_length": 45.1203703704, "max_line_length": 162, "alphanum_fraction": 0.6549353581, "include": true, "reason": "import numpy", "num_tokens": 2007}
|
from OpenGL.GL import *
from OpenGL.GL.shaders import compileProgram, compileShader
import json
import pyrr
import numpy as np
from utils.picker import Picker
from utils.text import TextDrawer
from utils.window import Window
class Drawer:
def __init__(self, path, win_params):
# data
with open(path, "r") as outfile:
self.data_base = json.load(outfile)
self.active_cell = -1
self.active_connection = -1
self.current = 0
self.cells_in_column = self.data_base['params']['cells_per_column']
self.num_of_cells = self.data_base['params']['num_cells']
self.start_point = [0, 0, 0]
self.size = int(np.sqrt(self.num_of_cells / self.cells_in_column))
distance = 5.
target = (distance * self.size / 2., 3 * distance * self.cells_in_column / 2., distance * self.size / 2.)
self.window = Window(win_params[0], win_params[1], target)
self.pick = Picker(self.window.width, self.window.height, self.num_of_cells)
# color settings
self.background_color = [0.98, 1.0, 0.98]
self.active_cells_color = [0.9, 0., 0.]
self.predictive_cells_color = [0., 0.9, 0.]
self.winner_cells_color = [0.99, 0.06, 0.75]
self.active_and_predictive_cells_color = [0.9, 0.9, 0.]
self.active_segments_color = [0., 0., 0.]
self.picked_color = [0., 0., 0.9]
self.default_color = [0.8, 0.8, 0.8]
self.bright_off = 2
self.bright_less = 0.4
self.textDrawer = TextDrawer()
self.textDrawer.load_font('fonts/calibri.ttf', 40 * 64, 1 * 64)
params = {
'data_base': self.data_base,
'cells_in_column': self.cells_in_column,
'num_of_cells': self.num_of_cells,
'start_point': self.start_point,
'size': self.size,
'background_color': self.background_color,
'active_cells_color': self.active_cells_color,
'predictive_cells_color': self.predictive_cells_color,
'winner_cells_color': self.winner_cells_color,
'active_and_predictive_cells_color': self.active_and_predictive_cells_color,
'active_segments_color': self.active_segments_color,
'picked_color': self.picked_color,
'default_color': self.default_color,
'bright_off': self.bright_off,
'bright_less': self.bright_less,
'draw_cells': [None],
'distance': distance,
'win_width': self.window.width,
'win_height': self.window.height
}
# primitives
cube_buffer = np.array([-0.5, -0.5, 0.5,
0.5, -0.5, 0.5,
0.5, 0.5, 0.5,
-0.5, 0.5, 0.5,
-0.5, -0.5, -0.5,
0.5, -0.5, -0.5,
0.5, 0.5, -0.5,
-0.5, 0.5, -0.5,
0.5, -0.5, -0.5,
0.5, 0.5, -0.5,
0.5, 0.5, 0.5,
0.5, -0.5, 0.5,
-0.5, 0.5, -0.5,
-0.5, -0.5, -0.5,
-0.5, -0.5, 0.5,
-0.5, 0.5, 0.5,
-0.5, -0.5, -0.5,
0.5, -0.5, -0.5,
0.5, -0.5, 0.5,
-0.5, -0.5, 0.5,
0.5, 0.5, -0.5,
-0.5, 0.5, -0.5,
-0.5, 0.5, 0.5,
0.5, 0.5, 0.5, ], dtype=np.float32)
cube_indices = np.array([0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
8, 9, 10, 10, 11, 8,
12, 13, 14, 14, 15, 12,
16, 17, 18, 18, 19, 16,
20, 21, 22, 22, 23, 20], dtype=np.uint32)
sphere_buffer, sphere_indices = self.create_sphere()
pyramid_buffer = np.array([-0.5, -0.5, -0.5,
0.5, -0.5, -0.5,
0.5, -0.5, 0.5,
-0.5, -0.5, 0.5,
0.5, -0.5, -0.5,
0.5, -0.5, 0.5,
0.0, 0.5, 0.0,
-0.5, -0.5, 0.5,
-0.5, -0.5, -0.5,
0.0, 0.5, 0.0,
-0.5, -0.5, -0.5,
0.5, -0.5, -0.5,
0.0, 0.5, 0.0,
0.5, -0.5, 0.5,
-0.5, -0.5, 0.5,
0.0, 0.5, 0.0], dtype=np.float32)
pyramid_indices = np.array([0, 1, 2, 2, 3, 0,
4, 5, 6,
7, 8, 9,
10, 11, 12,
13, 14, 15], dtype=np.uint32)
pyramid2_buffer = np.array([0.5, 0.0, -0.5,
0.5, 0.0, 0.5,
0.0, -0.5, 0.0,
-0.5, 0.0, 0.5,
-0.5, 0.0, -0.5,
0.0, -0.5, 0.0,
-0.5, 0.0, -0.5,
0.5, 0.0, -0.5,
0.0, -0.5, 0.0,
0.5, 0.0, 0.5,
-0.5, 0.0, 0.5,
0.0, -0.5, 0.0,
0.5, 0.0, -0.5,
0.5, 0.0, 0.5,
0.0, 0.5, 0.0,
-0.5, 0.0, 0.5,
-0.5, 0.0, -0.5,
0.0, 0.5, 0.0,
-0.5, 0.0, -0.5,
0.5, 0.0, -0.5,
0.0, 0.5, 0.0,
0.5, 0.0, 0.5,
-0.5, 0.0, 0.5,
0.0, 0.5, 0.0], dtype=np.float32)
pyramid2_indices = np.array([0, 1, 2,
3, 4, 5,
6, 7, 8,
9, 10, 11,
12, 13, 14,
15, 16, 17,
18, 19, 20,
21, 22, 23], dtype=np.uint32)
self.cells_active = Cells(params, pyramid_indices, pyramid_buffer)
self.cells_predictive = Cells(params, pyramid2_indices, pyramid2_buffer)
self.cells_active_predictive = Cells(params, sphere_indices, sphere_buffer)
self.cells_not = Cells(params, cube_indices, cube_buffer)
self.pick_cells = Primitives(params, cube_indices, cube_buffer)
self.pick_cells.create_instance_cubes_array()
# self.window.proj_loc = [self.cubes.proj_loc, self.pick_cells.proj_loc]
@staticmethod
def create_sphere():
arcs1 = 40
arcs2 = 20
r = 0.5
buffer = []
indices = []
for j in range(arcs2):
for i in range(arcs1 + 1):
buffer.append(r * np.sin(np.pi * j / arcs2) * np.cos(2 * np.pi * i / arcs1))
buffer.append(r * np.sin(np.pi * j / arcs2) * np.sin(2 * np.pi * i / arcs1))
buffer.append(r * np.cos(np.pi * j / arcs2))
if j != 0 and i != arcs1:
indices.append(j * (arcs1 + 1) + i)
indices.append((j + 1) * (arcs1 + 1) + i + 1)
indices.append(j * (arcs1 + 1) + i + 1)
if j != arcs2 - 1 and i != arcs1:
indices.append(j * (arcs1 + 1) + i)
indices.append((j + 1) * (arcs1 + 1) + i)
indices.append((j + 1) * (arcs1 + 1) + i + 1)
for i in range(arcs1 + 1):
buffer.append(0)
buffer.append(0)
buffer.append(r)
return np.around(np.array(buffer, dtype=np.float32), decimals=2), np.array(indices, dtype=np.uint32)
def cells_type(self):
active = []
predictive = []
active_predictive = []
not_colored = []
for cell in range(self.num_of_cells):
if abs(self.data_base[str(self.current)][str(cell)]['St']) == 1:
active.append(cell)
elif abs(self.data_base[str(self.current)][str(cell)]['St']) == 2:
predictive.append(cell)
elif abs(self.data_base[str(self.current)][str(cell)]['St']) == 3:
active_predictive.append(cell)
else:
not_colored.append(cell)
return active, predictive, active_predictive, not_colored
def update(self):
self.cells_active.draw_cells, self.cells_predictive.draw_cells, self.cells_active_predictive.draw_cells, self.cells_not.draw_cells = self.cells_type()
self.update_color()
def update_color(self):
self.cells_active.update(self.current,
self.window.show_segments,
self.window.show_active_segments,
self.active_cell,
self.active_connection)
self.cells_predictive.update(self.current,
self.window.show_segments,
self.window.show_active_segments,
self.active_cell,
self.active_connection)
self.cells_active_predictive.update(self.current,
self.window.show_segments,
self.window.show_active_segments,
self.active_cell,
self.active_connection)
self.cells_not.update(self.current,
self.window.show_segments,
self.window.show_active_segments,
self.active_cell,
self.active_connection)
def move(self, velocity=0.1):
if self.window.left:
self.window.cam.process_keyboard("LEFT", velocity)
if self.window.right:
self.window.cam.process_keyboard("RIGHT", velocity)
if self.window.forward:
self.window.cam.process_keyboard("FORWARD", velocity)
if self.window.backward:
self.window.cam.process_keyboard("BACKWARD", velocity)
def draw_legend(self):
left = 325
top = 100
text = self.data_base[str(self.current)]['text']
scale = (.8, 0.8)
self.textDrawer.draw_text(text, (self.window.width/2 - 20 * len(text)/2, self.window.height-40),
(self.window.width, self.window.height), scale=(1.2, 1.2),
foreColor=[0, 0, 0],
backColor=self.background_color)
if self.window.show_segments and not self.window.not_show_segments:
self.textDrawer.draw_text('Active segments', (self.window.width - left, self.window.height - top),
(self.window.width, self.window.height), scale=scale,
foreColor=self.active_segments_color,
backColor=self.background_color)
else:
if self.active_cell >= 0:
self.textDrawer.draw_text('Not connected cells', (self.window.width - left, self.window.height - top - 200),
(self.window.width, self.window.height), scale=scale,
foreColor=np.array(self.default_color) * self.bright_less,
backColor=self.background_color)
self.textDrawer.draw_text('Active cells', (self.window.width - left, self.window.height - top),
(self.window.width, self.window.height), scale=scale,
foreColor=self.active_cells_color,
backColor=self.background_color)
self.textDrawer.draw_text('Predictive cells', (self.window.width - left, self.window.height - top - 40),
(self.window.width, self.window.height), scale=scale,
foreColor=self.predictive_cells_color,
backColor=self.background_color)
self.textDrawer.draw_text('Winner cells', (self.window.width - left, self.window.height - top - 80),
(self.window.width, self.window.height), scale=scale,
foreColor=self.winner_cells_color,
backColor=self.background_color)
self.textDrawer.draw_text('Active-predictive cells', (self.window.width - left, self.window.height - top - 120),
(self.window.width, self.window.height), scale=scale,
foreColor=self.active_and_predictive_cells_color,
backColor=self.background_color)
self.textDrawer.draw_text('Picked out cell', (self.window.width - left, self.window.height - top - 160),
(self.window.width, self.window.height), scale=scale,
foreColor=self.picked_color,
backColor=self.background_color)
if self.active_connection > 0:
dict_segments = self.data_base[str(self.current)][str(self.active_cell)]['Se']
for segment in dict_segments.keys():
if str(self.active_connection) in dict_segments[segment]['Ce'].keys():
permanence = str(round(dict_segments[segment]['Ce'][str(self.active_connection)], 3))
permanence = 'permanenceForSynapse: ' + permanence
self.textDrawer.draw_text(str(permanence), (10, self.window.height - top),
(self.window.width, self.window.height), scale=scale,
foreColor=[0., 0., 0.],
backColor=self.background_color)
def process(self):
self.window.poll_events()
self.move(self.window.velocity)
view = self.window.cam.get_view_matrix()
glClearColor(*self.background_color, 1)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.cells_active.draw(view)
self.cells_predictive.draw(view)
self.cells_active_predictive.draw(view)
self.cells_not.draw(view)
self.draw_legend()
if self.window.picker:
self.pick_cells.draw(view, self.pick.EnableWriting)
active_ind = self.pick.read(self.window.mouseX, self.window.height - self.window.mouseY)
# print(active_ind[0], active_ind[0], active_ind[0])
if active_ind[0] == 255 and active_ind[1] == 255 and active_ind[2] == 255:
self.active_cell = -1
self.active_connection = -1
else:
self.active_cell = active_ind[2] + 256 * active_ind[1] + 256 * 256 * active_ind[0]
# print(self.active_cell)
self.pick.DisableWriting()
self.window.picker = False
self.update_color()
if self.window.picker_right:
self.pick_cells.draw(view, self.pick.EnableWriting)
active_ind = self.pick.read(self.window.mouseX, self.window.height - self.window.mouseY)
# print(active_ind[0], active_ind[0], active_ind[0])
if active_ind[0] == 255 and active_ind[1] == 255 and active_ind[2] == 255:
self.active_connection = -1
else:
self.active_connection = active_ind[2] + 256 * active_ind[1] + 256 * 256 * active_ind[0]
# print(self.active_cell)
self.pick.DisableWriting()
self.window.picker_right = False
self.update_color()
if self.window.show_segments and self.window.not_show_segments:
self.window.show_segments = False
self.update_color()
if self.window.show_segments and not self.window.not_show_segments:
if self.window.show_active_segments and self.window.not_show_active_segments:
self.window.show_active_segments = False
self.update_color()
self.window.swap_buffers()
class Primitives:
def __init__(self, params, indices, buffer):
self.draw_cells = params['draw_cells']
self.instance_array_cube = []
self.instance_array_cube_color = []
self.distance = params['distance']
self.num_of_draw_cells = params['num_of_cells']
self.win_width = params['win_width']
self.win_height = params['win_height']
self.start_point = params['start_point']
self.num_of_cells = params['num_of_cells']
self.cells_in_column = params['cells_in_column']
self.size = params['size']
# shaders
vertex_src = """
# version 330
layout(location = 0) in vec3 a_position;
layout(location = 1) in vec3 a_offset;
layout(location = 2) in vec3 a_color;
uniform mat4 model;
uniform mat4 projection;
uniform mat4 view;
out vec3 v_color;
void main()
{
vec3 final_pos = a_position + a_offset;
gl_Position = projection * view * model * vec4(final_pos, 1.0f);
v_color = a_color;
}
"""
fragment_src = """
# version 330
in vec3 v_color;
out vec4 out_color;
void main()
{
out_color = vec4(v_color, 0.8);
}
"""
self.shader = compileProgram(compileShader(vertex_src, GL_VERTEX_SHADER),
compileShader(fragment_src, GL_FRAGMENT_SHADER))
self.indices = indices
# VAO, VBO and EBO
self.vao = glGenVertexArrays(1)
vbo = glGenBuffers(1)
ebo = glGenBuffers(1)
# cube Buffer
glBindVertexArray(self.vao)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(GL_ARRAY_BUFFER, buffer.nbytes, buffer, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.indices.nbytes, self.indices, GL_STATIC_DRAW)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, buffer.itemsize * 3, ctypes.c_void_p(0))
projection = pyrr.matrix44.create_perspective_projection_matrix(45,
self.win_width / self.win_height,
0.1,
2000)
cube_pos = pyrr.matrix44.create_from_translation(pyrr.Vector3([0.0, 0.0, 0.0]))
glUseProgram(self.shader)
model_loc = glGetUniformLocation(self.shader, "model")
self.proj_loc = glGetUniformLocation(self.shader, "projection")
self.view_loc = glGetUniformLocation(self.shader, "view")
glUniformMatrix4fv(self.proj_loc, 1, GL_FALSE, projection)
glUniformMatrix4fv(model_loc, 1, GL_FALSE, cube_pos)
def create_instance_cubes_array(self):
self.instance_cube = glGenBuffers(1)
glBindVertexArray(self.vao)
if not (None in self.draw_cells):
self.num_of_draw_cells = len(self.draw_cells)
self.instance_array_cube = []
x, y, z = self.start_point
for cell in range(self.num_of_cells):
if cell in self.draw_cells or None in self.draw_cells:
translation = pyrr.Vector3([0.0, 0.0, 0.0])
translation.x = self.distance * x
translation.y = 3 * self.distance * y
translation.z = self.distance * z
self.instance_array_cube.append(translation)
if y < self.cells_in_column - 1:
y += 1
else:
y = 0
if x < self.size - 1:
x += 1
else:
x = 0
z += 1
self.instance_array_cube = np.array(self.instance_array_cube, np.float32).flatten()
glBindBuffer(GL_ARRAY_BUFFER, self.instance_cube)
glBufferData(GL_ARRAY_BUFFER, self.instance_array_cube.nbytes, self.instance_array_cube, GL_STATIC_DRAW)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p(0))
glVertexAttribDivisor(1, 1) # 1 means, every instance will have it's own translate
def draw(self, view, enable):
glUseProgram(self.shader)
glBindVertexArray(self.vao)
enable()
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glUniformMatrix4fv(self.view_loc, 1, GL_FALSE, view)
glDrawElementsInstanced(GL_TRIANGLES, len(self.indices), GL_UNSIGNED_INT, None, self.num_of_draw_cells)
class Cells(Primitives):
def __init__(self, params, indices, buffer):
Primitives.__init__(self, params, indices, buffer)
self.data_base = params['data_base']
self.background_color = params['background_color']
self.default_color = params['default_color']
self.active_cells_color = params['active_cells_color']
self.predictive_cells_color = params['predictive_cells_color']
self.winner_cells_color = params['winner_cells_color']
self.active_and_predictive_cells_color = params['active_and_predictive_cells_color']
self.active_segments_color = params['active_segments_color']
self.picked_color = params['picked_color']
self.bright_less = params['bright_less']
def update(self, current, show_segments, show_active_segments, active_cell, connection_cell):
self.create_instance_cubes_array()
self.instance_cube_color = glGenBuffers(1)
self.update_instance_cubes_color_array(current, show_segments, show_active_segments, active_cell, connection_cell)
def update_instance_cubes_color_array(self, current, show_segments, show_active_segments, active_cell, connection_cell):
glBindVertexArray(self.vao)
if active_cell >= 0:
dict_segments = self.data_base[str(current)][str(active_cell)]['Se']
self.instance_array_cube_color = []
for cell in range(self.num_of_cells):
if cell in self.draw_cells or None in self.draw_cells:
cell_key = str(cell)
default_color = pyrr.Vector3(self.default_color)
if abs(self.data_base[str(current)][cell_key]['St']) == 1: default_color = pyrr.Vector3(
self.active_cells_color)
if abs(self.data_base[str(current)][cell_key]['St']) == 2: default_color = pyrr.Vector3(
self.predictive_cells_color)
if abs(self.data_base[str(current)][cell_key]['St']) == 3: default_color = pyrr.Vector3(
self.active_and_predictive_cells_color)
if self.data_base[str(current)][cell_key]['St'] < 0: default_color = pyrr.Vector3([self.winner_cells_color])
if active_cell == cell or connection_cell == cell:
default_color = pyrr.Vector3(self.picked_color)
if active_cell >= 0:
color = default_color
if show_segments:
default_color = self.background_color
else:
default_color = default_color * self.bright_less
if len(dict_segments.keys()) != 0:
counter = 0
base0 = 50
for segment in dict_segments.keys():
state = dict_segments[segment]['St']
connected_cells = dict_segments[segment]['Ce']
connected_cells = [int(cell) for cell in connected_cells.keys()]
if cell in connected_cells:
if show_segments:
base = base0 + (counter // 6) * 40
if counter % 3 == 0: default_color = pyrr.Vector3([0., 0., base])
if counter % 3 == 1: default_color = pyrr.Vector3([0., base, 0.])
if counter % 3 == 2: default_color = pyrr.Vector3([base, 0., 0.])
if counter % 6 == 3: default_color = pyrr.Vector3([0., base, base])
if counter % 6 == 4: default_color = pyrr.Vector3([base, base, 0.])
if counter % 6 == 5: default_color = pyrr.Vector3([base, 0., base])
if show_active_segments and state:
default_color = pyrr.Vector3(self.active_segments_color)
else:
default_color = color
counter = counter + 1
self.instance_array_cube_color.append(default_color)
self.instance_array_cube_color = np.array(self.instance_array_cube_color, np.float32).flatten()
glBindBuffer(GL_ARRAY_BUFFER, self.instance_cube_color)
glBufferData(GL_ARRAY_BUFFER, self.instance_array_cube_color.nbytes, self.instance_array_cube_color,
GL_STATIC_DRAW)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p(0))
glVertexAttribDivisor(2, 1)
def draw(self, view):
glUseProgram(self.shader)
glBindVertexArray(self.vao)
glBindBuffer(GL_ARRAY_BUFFER, self.instance_cube_color)
glBufferData(GL_ARRAY_BUFFER, self.instance_array_cube_color.nbytes, self.instance_array_cube_color,
GL_STATIC_DRAW)
glEnableVertexAttribArray(2)
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p(0))
glVertexAttribDivisor(2, 1)
glUniformMatrix4fv(self.view_loc, 1, GL_FALSE, view)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glDrawElementsInstanced(GL_TRIANGLES, len(self.indices), GL_UNSIGNED_INT, None, self.num_of_draw_cells)
glDisable(GL_BLEND)
glDisable(GL_DEPTH_TEST)
|
{"hexsha": "65c9487aebad8c7a7e661a860ac84b7cd70ef3bc", "size": 27531, "ext": "py", "lang": "Python", "max_stars_repo_path": "watcher/utils/drawer.py", "max_stars_repo_name": "cog-isa/htm-rl", "max_stars_repo_head_hexsha": "baf5b67a11283d37165bf6a29d6808a234d6d98c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-09T22:09:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T22:09:24.000Z", "max_issues_repo_path": "watcher/utils/drawer.py", "max_issues_repo_name": "cog-isa/htm-rl", "max_issues_repo_head_hexsha": "baf5b67a11283d37165bf6a29d6808a234d6d98c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "watcher/utils/drawer.py", "max_forks_repo_name": "cog-isa/htm-rl", "max_forks_repo_head_hexsha": "baf5b67a11283d37165bf6a29d6808a234d6d98c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-18T08:54:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T08:54:20.000Z", "avg_line_length": 46.8214285714, "max_line_length": 158, "alphanum_fraction": 0.5082270895, "include": true, "reason": "import numpy", "num_tokens": 6155}
|
from flask import Flask
from flask import jsonify
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import numpy as np
# setup db & reflect it
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
# our tables
m = Base.classes.measurement
s = Base.classes.station
session = Session(engine)
# setup flask
app = Flask(__name__)
# define flask routes
@app.route("/", methods=['GET'])
def hello():
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start/end<br/>"
)
# get precipitation in json
@app.route("/api/v1.0/precipitation", methods=['GET'])
def precip():
precip = session.query(m.date, m.prcp).filter(m.date > '2016-08-22').all()
#print(precip)
precip_dict = {}
for d, p in precip:
precip_dict[d] = p
return jsonify(precip_dict)
@app.route("/api/v1.0/stations", methods=['GET'])
def stations():
stations = session.query(s.station).all()
stations_list = list(np.ravel(stations))
return jsonify(stations_list)
@app.route("/api/v1.0/tobs", methods=['GET'])
def tobs():
temps = session.query(m.date, m.tobs).filter(m.date > '2016-08-22').all()
temp_dict = {}
for d, t in temps:
temp_dict[d] = t
return jsonify(temp_dict)
@app.route("/api/v1.0/<start>", methods=['GET'])
@app.route("/api/v1.0/<start>/<end>", methods=['GET'])
def ranged_temps(start = None, end = None):
if not end:
temps = session.query(\
func.min(m.tobs),\
func.avg(m.tobs),\
func.max(m.tobs)).\
filter(m.date >= start).all()
temps_list = list(np.ravel(temps))
return jsonify(temps_list)
temps = session.query(\
func.min(m.tobs),\
func.avg(m.tobs),\
func.max(m.tobs)).\
filter(m.date >= start).filter(m.date <= end).all()
temps_list = list(np.ravel(temps))
return jsonify(temps_list)
if __name__ == '__main__':
app.run(debug=True)
|
{"hexsha": "bbd62b7cf510c7cc1189a255ecfab71e8bc8b3b4", "size": 2226, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "nukemecha/sqlalchemy-challenge", "max_stars_repo_head_hexsha": "e7cfb226a5639ca6a605a880a52aaba113e7c59c", "max_stars_repo_licenses": ["ADSL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "nukemecha/sqlalchemy-challenge", "max_issues_repo_head_hexsha": "e7cfb226a5639ca6a605a880a52aaba113e7c59c", "max_issues_repo_licenses": ["ADSL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "nukemecha/sqlalchemy-challenge", "max_forks_repo_head_hexsha": "e7cfb226a5639ca6a605a880a52aaba113e7c59c", "max_forks_repo_licenses": ["ADSL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.26, "max_line_length": 78, "alphanum_fraction": 0.6154537287, "include": true, "reason": "import numpy", "num_tokens": 582}
|
# encoding: utf8
import csv
import string
import numpy as np
import math
def load_data(filename, train_ratio):
with open(filename, "r") as f:
csv_reader = csv.reader(f)
next(csv_reader, None) # header
dataset = [(line[0], line[1]) for line in csv_reader]
np.random.shuffle(dataset)
train_size = int(len(dataset) * train_ratio)
return dataset[:train_size], dataset[train_size:]
def train(train_set):
total_doc_cnt = len(train_set)
label_doc_cnt = {}
bigdoc_words = {}
for label, doc in train_set:
if label not in label_doc_cnt:
# init
label_doc_cnt[label] = 0
bigdoc_words[label] = []
label_doc_cnt[label] += 1
bigdoc_words[label].extend([
w.strip(string.punctuation) for w in doc.split()])
vocabulary = set()
for words in bigdoc_words.values():
vocabulary |= set(words)
V = len(vocabulary)
log_priors = {label: math.log(1.0 * cnt / total_doc_cnt) for label, cnt in label_doc_cnt.items()}
log_likelihoods = dict()
for label, words in bigdoc_words.items():
word_cnt = len(words) + V
log_likelihoods[label] = [math.log(1.0 * (1 + words.count(w)) / word_cnt) for w in vocabulary]
return log_priors, log_likelihoods, vocabulary
def predict(log_priors, log_likelihoods, vocabulary, input_text, expect_label=None):
words = {w.strip(string.punctuation) for w in input_text.split()}
prob_max = 0
label_max = None
probs = {} # tmp for log
for label, likelihood in log_likelihoods.items():
prob = log_priors[label] + sum([p for w, p in zip(vocabulary, likelihood) if w in words])
probs[label] = prob
if not prob_max or prob > prob_max:
prob_max = prob
label_max = label
if expect_label and expect_label != label_max:
print('---')
print('expect: %s, got: %s' % (expect_label, label_max))
print(probs)
print(input_text)
return label_max
def main():
filename = 'spam.csv'
train_ratio = 0.75
train_data, test_data = load_data(filename, train_ratio)
print('data loaded. train: {}, test: {}').format(
len(train_data), len(test_data))
# train the model
log_priors, log_likelihoods, vocabulary = train(train_data)
print('model trained. log_priors: {}, V(vocabulary word count): {}'.format(log_priors, len(vocabulary)))
pos_true = 0
pos_false = 0
neg_false = 0
neg_true = 0
for label, text in test_data:
got = predict(log_priors, log_likelihoods, vocabulary, text, label)
if label != got:
if label == 'spam':
pos_false += 1
else:
neg_false += 1
else:
if label == 'spam':
pos_true += 1
else:
neg_true += 1
print('positive(spam) true: %s, false: %s' % (pos_true, pos_false))
print('negative true: %s, false: %s' % (neg_true, neg_false))
print('Precision: %.2f%%, Recall: %.2f%%' % (
100.0 * pos_true / (pos_true + pos_false),
100.0 * pos_true / (pos_true + neg_false),
))
if __name__ == '__main__':
main()
|
{"hexsha": "ccadb929de3505d54c219d8bfc9ef214b9f235e0", "size": 3235, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/kaggle-uciml-sms-spam/spam-detection-using-naive-bayes.py", "max_stars_repo_name": "JackonYang/paper-reading", "max_stars_repo_head_hexsha": "330c6724dc1e3a56d40de4c19b9965810698b60d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 85, "max_stars_repo_stars_event_min_datetime": "2020-04-02T12:21:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T03:47:51.000Z", "max_issues_repo_path": "datasets/kaggle-uciml-sms-spam/spam-detection-using-naive-bayes.py", "max_issues_repo_name": "renyi533/paper-reading", "max_issues_repo_head_hexsha": "5b15132725dba27efc889bdd061a2c3d7f3c22e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-08T01:49:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T01:49:27.000Z", "max_forks_repo_path": "datasets/kaggle-uciml-sms-spam/spam-detection-using-naive-bayes.py", "max_forks_repo_name": "renyi533/paper-reading", "max_forks_repo_head_hexsha": "5b15132725dba27efc889bdd061a2c3d7f3c22e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2020-04-02T07:49:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T08:52:45.000Z", "avg_line_length": 28.1304347826, "max_line_length": 108, "alphanum_fraction": 0.6015455951, "include": true, "reason": "import numpy", "num_tokens": 844}
|
import sys, os, argparse
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.backends.cudnn as cudnn
import torchvision
import torch.nn.functional as F
from PIL import Image
import datasets, hopenet, utils
from skimage import io
import dlib
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Head pose estimation using the Hopenet network.')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--snapshot', dest='snapshot', help='Path of model snapshot.',
default='', type=str)
parser.add_argument('--face_model', dest='face_model', help='Path of DLIB face detection model.',
default='', type=str)
parser.add_argument('--video', dest='video_path', help='Path of video')
parser.add_argument('--output_string', dest='output_string', help='String appended to output file')
parser.add_argument('--n_frames', dest='n_frames', help='Number of frames', type=int)
parser.add_argument('--fps', dest='fps', help='Frames per second of source video', type=float, default=30.)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
font = cv2.FONT_HERSHEY_COMPLEX
cudnn.enabled = True
batch_size = 1
gpu = args.gpu_id
snapshot_path = args.snapshot
out_dir = 'output/video'
video_path = args.video_path
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(args.video_path):
sys.exit('Video does not exist')
# ResNet50 structure
model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
# Dlib face detection model
cnn_face_detector = dlib.cnn_face_detection_model_v1(args.face_model)
print ('Loading snapshot.')
# Load snapshot
saved_state_dict = torch.load(snapshot_path)
model.load_state_dict(saved_state_dict)
print ('Loading data.')
transformations = transforms.Compose([transforms.Scale(224),
transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
model.cuda(gpu)
print ('Ready to test network.')
# Test the Model
model.eval() # Change model to 'eval' mode (BN uses moving mean/var).
total = 0
idx_tensor = [idx for idx in range(66)]
idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)
video = cv2.VideoCapture(video_path)
# New cv2
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) # float
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('output/video/output-%s.avi' % args.output_string, fourcc, args.fps, (width, height))
# # Old cv2
# width = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) # float
# height = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) # float
#
# # Define the codec and create VideoWriter object
# fourcc = cv2.cv.CV_FOURCC(*'MJPG')
# out = cv2.VideoWriter('output/video/output-%s.avi' % args.output_string, fourcc, 30.0, (width, height))
txt_out = open('output/video/output-%s.txt' % args.output_string, 'w')
frame_num = 1
cnt = 1
while frame_num <= args.n_frames:
print (frame_num)
ret,frame = video.read()
if ret == False:
break
cv2_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# Dlib detect
dets = cnn_face_detector(cv2_frame, 1)
for idx, det in enumerate(dets):
# Get x_min, y_min, x_max, y_max, conf
x_min = det.rect.left()
y_min = det.rect.top()
x_max = det.rect.right()
y_max = det.rect.bottom()
conf = det.confidence
if conf > 1.0:
bbox_width = abs(x_max - x_min)
bbox_height = abs(y_max - y_min)
x_min -= 2 * bbox_width / 4
x_max += 2 * bbox_width / 4
y_min -= 3 * bbox_height / 4
y_max += bbox_height / 4
x_min = max(x_min, 0); y_min = max(y_min, 0)
x_max = min(frame.shape[1], x_max); y_max = min(frame.shape[0], y_max)
# Crop image
img = cv2_frame[y_min:y_max,x_min:x_max]
img = Image.fromarray(img)
# Transform
img = transformations(img)
img_shape = img.size()
img = img.view(1, img_shape[0], img_shape[1], img_shape[2])
img = Variable(img).cuda(gpu)
yaw, pitch, roll = model(img)
yaw_predicted = F.softmax(yaw)
pitch_predicted = F.softmax(pitch)
roll_predicted = F.softmax(roll)
# Get continuous predictions in degrees.
yaw_predicted = torch.sum(yaw_predicted.data[0] * idx_tensor) * 3 - 99
pitch_predicted = torch.sum(pitch_predicted.data[0] * idx_tensor) * 3 - 99
roll_predicted = torch.sum(roll_predicted.data[0] * idx_tensor) * 3 - 99
# Print new frame with cube and axis
txt_out.write(str(frame_num) + ' %f %f %f\n' % (yaw_predicted, pitch_predicted, roll_predicted))
'''
#roll_predicted
if roll_predicted > 25:
cv2.putText(frame,"HeadPose:Left ", (20, 20), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
#pose = "Right"
pose = "Left"
print("Right")
elif roll_predicted < -25:
cv2.putText(frame, "HeadPose:Right" , (20, 20), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
#pose = "Left"
pose = "Right"
print("Left")
else:
cv2.putText(frame,"HeadPose:Front ", (20, 20), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
pose = "Front"
print("Front")
cv2.putText(frame, "HeadRoll:" + pose, (20, 20), font, 0.8, (0, 0, 0), 2, cv2.LINE_AA)
#yaw_predicted
if yaw_predicted > 25:
#cv2.putText(frame,"HeadPose:Left ", (20, 20), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
#pose = "Right"
pose = "Left"
print("Right")
elif yaw_predicted < -25:
#cv2.putText(frame, "HeadPose:Right" , (20, 20), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
#pose = "Left"
pose = "Right"
print("Left")
else:
#cv2.putText(frame,"HeadPose:Front ", (20, 20), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
pose = "Front"
print("Front")
cv2.putText(frame, "HeadYaw:" + pose, (20, 60), font, 0.8, (0, 0, 0), 2, cv2.LINE_AA)
'''
#car2_2
#pitch_predicted
if pitch_predicted > 25:
#cv2.putText(frame,"HeadPose:Left ", (20, 60), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
pitch_pose = "Up"
#pose = "Left"
print("Up")
elif pitch_predicted < -25:
#cv2.putText(frame, "HeadPose:Right" , (20, 60), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
pitch_pose = "Down"
#pose = "Right"
print("Down")
else:
#cv2.putText(frame,"HeadPose:Front ", (20, 60), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
pitch_pose = "Front"
print("Front")
#rool_pose
if roll_predicted > 35:
#cv2.putText(frame,"HeadPose:Left ", (20, 20), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
#pose = "Right"
rool_pose = "Left"
print("Left")
#print(idx)
elif roll_predicted < -15:
#cv2.putText(frame, "HeadPose:Right" , (20, 20), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
#pose = "Left"
rool_pose = "Right"
print("Right")
#print(idx)
else:
#cv2.putText(frame,"HeadPose:Front ", (20, 20), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
rool_pose = "Front"
print("Front")
#print(idx)
#yaw_predicted
if yaw_predicted > 0:
#cv2.putText(frame,"HeadPose:Left ", (20, 60), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
pose = "Right"
#pose = "Left"
print("Right")
elif yaw_predicted < -45:
#cv2.putText(frame, "HeadPose:Right" , (20, 60), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
pose = "Left"
#pose = "Right"
print("Left")
else:
#cv2.putText(frame,"HeadPose:Front ", (20, 60), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
pose = "Front"
print("Front")
# utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = bbox_width)
utils.draw_axis(frame, yaw_predicted, pitch_predicted, roll_predicted, tdx = (x_min + x_max) / 2, tdy= (y_min + y_max) / 2, size = bbox_height/2)
#print((x_min + x_max) / 2)
# Plot expanded bounding box
#cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0,255,0), 1)
cv2.putText(frame, "HeadPitch:" + pitch_pose, (20, 20), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(frame, "HeadRoll:" + rool_pose, (20, 50), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(frame, "HeadYaw:" + pose, (20, 80), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
out.write(frame)
frame_num += 1
out.release()
video.release()
|
{"hexsha": "0a1052398089660f5ea90515870243d75dc721a2", "size": 10534, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/test_on_video_dlib.py", "max_stars_repo_name": "chenyeheng/SmartCar-HeadPose", "max_stars_repo_head_hexsha": "c804b5ea58929885c8dcc349194d785d66cc6443", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-05-27T13:22:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-17T01:15:00.000Z", "max_issues_repo_path": "code/test_on_video_dlib.py", "max_issues_repo_name": "yehengchen/HeadPoseEstimation", "max_issues_repo_head_hexsha": "c804b5ea58929885c8dcc349194d785d66cc6443", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/test_on_video_dlib.py", "max_forks_repo_name": "yehengchen/HeadPoseEstimation", "max_forks_repo_head_hexsha": "c804b5ea58929885c8dcc349194d785d66cc6443", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5860805861, "max_line_length": 161, "alphanum_fraction": 0.5277197646, "include": true, "reason": "import numpy", "num_tokens": 2796}
|
# Circular planar piston
# Evaluate the acoustic field generated by a circular planar piston
# Collocation ("discrete dipole") approximation of the volume integral
# equation for 3D acoustic scattering
import os
import sys
from IPython import embed
# FIXME: figure out how to avoid this sys.path stuff
sys.path.append(os.path.join(os.path.dirname(__file__),'../../'))
import numpy as np
# from piston import plane_circular_piston
from vines.geometry.geometry import generatedomain
from vines.fields.piston import plane_circular_piston
k1 = (4225.410428500058 + 0.02498j)
pressure_surface = 4*10**6
radius = 0.005
# n_elements = 2**12
# radius = 0.02 # 7.5cm
# aperture_radius = 0.00 # 2cm
# focal_length = 0.03 # 13cm
# focus = [0., 0., 0.]
lam = 2 * np.pi / np.real(k1)
print('lam = ', lam)
def attenuation(f0):
alpha = 0.217 * (f0 * 1e-6)**2
return alpha
k1 = np.real(k1) + 1j * attenuation(1e6)
c0 = 1487.0 # wavespeed
# p0 = 1.35e4 # initial pressure amplitude
# p0 = 4.41e6
beta = 3.5e0
rho = 998
nPerLam = 5 # number of voxels per interior wavelength
res = lam / nPerLam
# Dimension of computation domain
# x_start = -0.98 * np.sqrt(focal_length**2 - radius**2) # just to the right of bowl
# x_end = 0.01
# wx = x_end - x_start
# wy = 2 * 0.83 * radius # slightly narrower than bowl
# # wy = 2 * radius # same width as bowl
# wz = wy
x_start = 0.0005
x_end = 0.06
wx = x_end - x_start
wy = 2 * radius
wz = wy
# embed()
import time
start = time.time()
r, L, M, N = generatedomain(res, wx, wy, wz)
# Adjust r
r[:, :, :, 0] = r[:, :, :, 0] - r[0, 0, 0, 0] + x_start
end = time.time()
print('Mesh generation time:', end-start)
# embed()
points = r.reshape(L*M*N, 3, order='F')
start = time.time()
p = plane_circular_piston(radius, k1, points.T)
p *= pressure_surface * 2j * k1
P = p.reshape(L, M, N, order='F')
end = time.time()
print('Incident field evaluation time (s):', end-start)
# Array to be populated with different harmonics evaluated on central axis
ny_centre = np.int(np.floor(M/2))
nz_centre = np.int(np.floor(N/2))
harmonics = np.zeros((4, L), dtype=np.complex128)
# First harmonic (i.e., incident field)
harmonics[0, :] = P[:, ny_centre, nz_centre]
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
matplotlib.rcParams.update({'font.size': 22})
# plt.rc('font', family='serif')
fig = plt.figure(figsize=(15, 5))
ax = fig.gca()
plt.imshow(np.real(P[:, :, np.int(np.floor(N/2))].T),
cmap=plt.cm.get_cmap('viridis'), interpolation='spline16')
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.colorbar()
fig.savefig('VIE_bowl_transducer_1p5cm.png')
plt.close()
# embed()
# Generate volume potential operator for 2xk
dx = r[1, 0, 0, 0] - r[0, 0, 0, 0]
vol = (dx)**3 # voxel volume
a = (3/4 * vol / np.pi)**(1/3) # radius of sphere of same volume
# ################# SECOND HARMONIC ########################
# # ko = 2 * k1
# ko = 2 * np.real(k1) + 1j * attenuation(2e6)
# # Create Toeplitz operator
# R0 = r[0, 0, 0, :]
# self = (1/ko**2 - 1j*a/ko) * np.exp(1j*ko*a) - 1/ko**2
# nearby_quad = 'off'
# n_quad = 10
# xG, wG = np.polynomial.legendre.leggauss(n_quad)
# XG, YG, ZG = np.meshgrid(xG, xG, xG)
# XW, YW, ZW = np.meshgrid(wG*0.5, wG*0.5, wG*0.5)
# # from operators import potential_fast
# from numba import jit, njit, prange
# @njit(parallel=True)
# def potential_fast(ko):
# toep = np.zeros((L, M, N), dtype=np.complex128)
# for i in prange(0, L):
# for j in range(0, M):
# for k in range(0, N):
# R1 = r[i,j,k,:]
# rk_to_rj = R1-R0
# rjk = np.linalg.norm(rk_to_rj)
# if nearby_quad in 'on':
# if rjk < 5 * dx and rjk > 1e-15:
# x_grid = R1[0] + dx/2 * XG
# y_grid = R1[1] + dx/2 * YG
# z_grid = R1[2] + dx/2 * ZG
# temp = 0.0+0.0j
# for iQ in range(0, n_quad):
# for jQ in range(0, n_quad):
# for kQ in range(0, n_quad):
# RQ = np.array([x_grid[iQ, jQ, kQ],
# y_grid[iQ, jQ, kQ],z_grid[iQ, jQ, kQ]])
# rk_to_rj = RQ - R0
# rjk = np.linalg.norm(rk_to_rj)
# rjk_hat = rk_to_rj / rjk
# rjkrjk = np.outer(rjk_hat, rjk_hat)
# Ajk = np.exp(1j * ko * rjk) / (4 * np.pi * rjk) * dx**3
# # Draine & Flatau
# temp = temp + Ajk * XW[iQ, jQ, kQ] * YW[iQ, jQ, kQ] * ZW[iQ, jQ, kQ]
# # from IPython import embed; embed()
# toep[i, j, k] = temp
# else:
# if np.abs(rjk) > 1e-15:
# toep[i, j, k] = \
# np.exp(1j * ko * rjk) / (4 * np.pi * rjk) * dx**3
# else:
# toep[i, j, k] = self
# else:
# if np.abs(rjk) > 1e-15:
# toep[i, j, k] = \
# np.exp(1j * ko * rjk) / (4 * np.pi * rjk) * dx**3
# else:
# toep[i, j, k] = self
# return toep
# start = time.time()
# toep = potential_fast(ko)
# end = time.time()
# print('Operator assembly time:', end-start)
# # toep = ko**2 * toep
# toep = toep
# start = time.time()
# from operators import circulant_embed
# circ_op = circulant_embed(toep, L, M, N)
# end = time.time()
# print('Time for circulant embedding and FFT:', end-start)
# from matvecs_acoustic import mvp_vec
# xIn = np.zeros((L, M, N), dtype=np.complex128)
# xIn = P
# xInVec = xIn.reshape((L*M*N, 1), order='F')
# idx = np.ones((L, M, N), dtype=bool)
# mvp = lambda x: mvp_vec(x, circ_op, idx, Mr)
# # Voxel permittivities
# Mr = np.ones((L, M, N), dtype=np.complex128)
# start = time.time()
# xOut = mvp(2 * beta * np.real(k1)**2 / (rho * c0**2) * xInVec * xInVec)
# end = time.time()
# print('Time for MVP:', end-start)
# # from IPython import embed; embed()
# P2 = xOut.reshape(L, M, N, order='F')
# #-------------- Third harmonic ----------------#
# # Create volume potential to evaluate next harmonic
# # ko = 3 * k1
# ko = 3 * np.real(k1) + 1j * attenuation(3e6)
# start = time.time()
# toep = potential_fast(ko)
# end = time.time()
# print('Operator assembly time:', end-start)
# start = time.time()
# from operators import circulant_embed
# circ_op = circulant_embed(toep, L, M, N)
# end = time.time()
# print('Time for circulant embedding and FFT:', end-start)
# xIn = np.zeros((L, M, N), dtype=np.complex128)
# f_rhs = P * P2
# xInVec = f_rhs.reshape((L*M*N, 1), order='F')
# start = time.time()
# xOut = mvp(9 * beta * np.real(k1)**2 / (rho * c0**2) * xInVec)
# end = time.time()
# print('Time for MVP:', end-start)
# # from IPython import embed; embed()
# P3 = xOut.reshape(L, M, N, order='F')
# total = P + P2 + P3
# # Plot harmonics along central axis
# x_line = (r[:, ny_centre, nz_centre, 0]) * 100
# fig = plt.figure(figsize=(14, 8))
# ax = fig.gca()
# plt.plot(x_line, np.abs(P[:, ny_centre, nz_centre])/1e6,'k-', linewidth=2)
# plt.plot(x_line, np.abs(P2[:, ny_centre, nz_centre])/1e6,'r-', linewidth=2)
# plt.plot(x_line, np.abs(P3[:, ny_centre, nz_centre])/1e6,'b-', linewidth=2)
# plt.plot(x_line, np.abs(total[:, ny_centre, nz_centre])/1e6,'g-', linewidth=2)
# plt.grid(True)
# # plt.xlim([1, 7])
# # plt.ylim([0, 9])
# plt.xlabel(r'Axial distance (cm)')
# plt.ylabel(r'Pressure (MPa)')
# fig.savefig('images/VIE_piston_harms_axis.png')
# plt.close()
# exit(1)
# # Right-hand side for computation of next harmonic
# f_rhs = P2 * P2 + 2 * P * P3
# # Plot f_rhs
# rel_p = np.log10(np.abs(f_rhs) / np.max(np.abs(f_rhs)))
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# matplotlib.rcParams.update({'font.size': 26})
# # matplotlib.rc('font',**{'family':'serif','serif':['Palatino']})
# # plt.rc('text', usetex=True)
# fig = plt.figure(figsize=(14, 8))
# ax = fig.gca()
# CS = plt.contourf(rel_p[:, :, np.int(np.round(N/2))].T,
# extent=[x_start, x_end, -wy/2, wy/2],
# levels=[-4, -3.5, -2.5, -2.0, -1.5, -1, -0.5, 0],
# cmap=plt.cm.viridis,
# extend='both')
# cbar = plt.colorbar(CS)
# CS.cmap.set_under('black')
# CS.changed()
# cbar.ax.set_ylabel('log$_{10}(|f_4|$/max$|f_4|)$')
# # labels
# plt.ylabel('$y$ (cm)')
# plt.xlabel('$z$ (cm)')
# fig.savefig('images/test_p4.png', dpi=300)
# plt.close()
# ################### P4 computation ######################
# # Create volume potential to evaluate next harmonic
# ko = 4 * k1
# start = time.time()
# toep = potential_fast(ko)
# end = time.time()
# print('Operator assembly time:', end-start)
# start = time.time()
# from operators import circulant_embed
# circ_op = circulant_embed(toep, L, M, N)
# end = time.time()
# print('Time for circulant embedding and FFT:', end-start)
# # For chopping of domain where f_rhs below a certain size
# rel_p = np.log10(np.abs(f_rhs)/np.max(np.abs(f_rhs)))
# # TOL = np.array([-0.5, -1, -1.5, -2, -2.5, -3, -3.5, -4])
# TOL = np.array([-0.5, -0.75, -1, -1.25, -1.5, -1.75, -2, -2.25, -2.5, -2.75,
# -3, -3.25, -3.5, -3.75, -4])
# line_harmonic = np.zeros((TOL.shape[0], L), dtype=np.complex128)
# xMinVals = np.zeros(TOL.shape[0])
# xMaxVals = np.zeros(TOL.shape[0])
# yMinVals = np.zeros(TOL.shape[0])
# yMaxVals = np.zeros(TOL.shape[0])
# for i_tol in range(TOL.shape[0]):
# where_bigger = np.argwhere(rel_p > TOL[i_tol])
# min_x_idx = np.min(where_bigger[:, 0])
# max_x_idx = np.max(where_bigger[:, 0])
# min_y_idx = np.min(where_bigger[:, 1])
# max_y_idx = np.max(where_bigger[:, 1])
# min_z_idx = np.min(where_bigger[:, 2])
# max_z_idx = np.max(where_bigger[:, 2])
# xMinVals[i_tol] = r[min_x_idx,0,0,0]
# xMaxVals[i_tol] = r[max_x_idx,0,0,0]
# yMinVals[i_tol] = r[0,min_y_idx,0,1]
# yMaxVals[i_tol] = r[0,max_y_idx,0,1]
# print('Size x = ', (max_x_idx-min_x_idx)*dx)
# print('Size y,z = ', (max_y_idx-min_y_idx)*dx)
# P_trim = np.zeros((L, M, N), dtype=np.complex128)
# P_trim[min_x_idx:max_x_idx, min_y_idx:max_y_idx, min_z_idx:max_z_idx] = \
# f_rhs[min_x_idx:max_x_idx, min_y_idx:max_y_idx, min_z_idx:max_z_idx]
# xIn = P_trim
# xInVec = xIn.reshape((L*M*N, 1), order='F')
# idx = np.ones((L, M, N), dtype=bool)
# mvp = lambda x: mvp_vec(x, circ_op, idx, Mr)
# # Voxel permittivities
# Mr = np.ones((L, M, N), dtype=np.complex128)
# start = time.time()
# xOut = mvp(8 * beta * np.real(k1)**2 / (rho * c0**2) * xInVec)
# end = time.time()
# print('Time for MVP:', end-start)
# # from IPython import embed; embed()
# field = xOut.reshape(L, M, N, order='F')
# line = field[:, ny_centre, nz_centre]
# line_harmonic[i_tol, :] = line
# # filename = 'results/axial_x60lam_yz20lam_nPerLam10.npy'
# # filename = 'results/axial_x40lam_vary_yz30lam_nPerLam20.npy'
# import pickle
# with open('results/axial_p4_vary_domain_size_log_fine_20vpl1.pickle', 'wb') as f:
# pickle.dump([line_harmonic, xMinVals, xMaxVals, yMinVals, yMaxVals], f)
# # with open('results/axial_vary_domain_length_10vpl.pickle', 'wb') as f:
# # pickle.dump([line_harmonic, X_SUB], f)
# exit(1)
# filename = 'results/axial_vary_domain_size_absolute_5vpl.npy'
# np.save(filename, line_harmonic)
# #-------------- Next harmonic ----------------#
# # Create volume potential to evaluate next harmonic
# ko = 4 * k1
# start = time.time()
# toep = potential_fast(ko)
# end = time.time()
# print('Operator assembly time:', end-start)
# start = time.time()
# from operators import circulant_embed
# circ_op = circulant_embed(toep, L, M, N)
# end = time.time()
# print('Time for circulant embedding and FFT:', end-start)
# xIn = np.zeros((L, M, N), dtype=np.complex128)
# xIn = field3
# xInVec3 = xIn.reshape((L*M*N, 1), order='F')
# start = time.time()
# xOut = mvp(8 * beta * np.real(k1)**2 / (rho * c0**2) * \
# (xInVec2 * xInVec2 + 2 * xInVec * xInVec3))
# end = time.time()
# print('Time for MVP:', end-start)
# # from IPython import embed; embed()
# field4 = xOut.reshape(L, M, N, order='F')
# # Third harmonic
# harmonics[3, :] = field4[:, ny_centre, nz_centre]
# fig = plt.figure(figsize=(10, 10))
# ax = fig.gca()
# plt.imshow(np.abs(field4[:, :, np.int(np.floor(N/2))].T),
# cmap=plt.cm.get_cmap('RdBu_r'), interpolation='spline16')
# plt.xlabel(r'$x$')
# plt.ylabel(r'$y$')
# plt.colorbar()
# fig.savefig('images/VIE_bowl_transducer_harm4_1p5cm.png')
# plt.close()
# # Plot harmonics along central axis
# x_line = (r[:, ny_centre, nz_centre, 0] + 0.05) * 100
# fig = plt.figure(figsize=(14, 8))
# ax = fig.gca()
# plt.plot(x_line, np.abs(harmonics[0,:])/1e6,'k-', linewidth=2)
# plt.plot(x_line, np.abs(harmonics[1,:])/1e6,'r-', linewidth=2)
# plt.plot(x_line, np.abs(harmonics[2,:])/1e6,'b-', linewidth=2)
# plt.plot(x_line, np.abs(harmonics[3,:])/1e6,'g-', linewidth=2)
# plt.grid(True)
# plt.xlim([1, 7])
# plt.ylim([0, 9])
# plt.xlabel(r'Axial distance (cm)')
# plt.ylabel(r'Pressure (MPa)')
# fig.savefig('images/VIE_harms_axis.png')
# plt.close()
# # np.savez('results/VIE_harms_1p5cm.npy', harms=harmonics, x_line=x_line)
# np.save('results/VIE_harms_1p5cm.npy', np.array([harmonics, x_line]))
# embed()
|
{"hexsha": "549558e02960ab7a17164759e59f5b4d38969f83", "size": 13677, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/hifu/circular_planar_piston.py", "max_stars_repo_name": "AndrewGibbs/vines", "max_stars_repo_head_hexsha": "4c4f75adc8f601f06e2ab12fbaa95a047ef4354e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-07-05T19:01:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T18:27:21.000Z", "max_issues_repo_path": "demo/hifu/circular_planar_piston.py", "max_issues_repo_name": "AndrewGibbs/vines", "max_issues_repo_head_hexsha": "4c4f75adc8f601f06e2ab12fbaa95a047ef4354e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/hifu/circular_planar_piston.py", "max_forks_repo_name": "AndrewGibbs/vines", "max_forks_repo_head_hexsha": "4c4f75adc8f601f06e2ab12fbaa95a047ef4354e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-31T13:57:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T08:49:03.000Z", "avg_line_length": 33.0362318841, "max_line_length": 106, "alphanum_fraction": 0.5768808949, "include": true, "reason": "import numpy,from numba", "num_tokens": 4582}
|
import numpy as np
import matplotlib.pyplot as plt
"""
PLOTS PROBABILITY DISTRIBUTION FUNCTIONS
"""
def main():
vals = []
mu = 0.3
np.random.seed(1)
vals = np.random.poisson(mu, size=1000)
print(vals)
hist_vals = np.histogram(vals, bins=np.arange(0,100))
print(hist_vals)
plt.figure()
plt.hist(vals, bins=np.arange(0,100), density=True)
plt.show()
if __name__ == "__main__":
main()
|
{"hexsha": "324d8851745a167d9b99732e8e7ff66640bc972e", "size": 434, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/testscripts/probs.py", "max_stars_repo_name": "alexberndt/mobile-AGV-optimization", "max_stars_repo_head_hexsha": "76b97fd5aa3898fd6cb6f74f8d87140555c92af5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-22T03:07:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T09:41:29.000Z", "max_issues_repo_path": "python/testscripts/probs.py", "max_issues_repo_name": "alexberndt/mobile-AGV-optimization", "max_issues_repo_head_hexsha": "76b97fd5aa3898fd6cb6f74f8d87140555c92af5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/testscripts/probs.py", "max_forks_repo_name": "alexberndt/mobile-AGV-optimization", "max_forks_repo_head_hexsha": "76b97fd5aa3898fd6cb6f74f8d87140555c92af5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-22T10:58:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-22T10:58:38.000Z", "avg_line_length": 18.8695652174, "max_line_length": 57, "alphanum_fraction": 0.633640553, "include": true, "reason": "import numpy", "num_tokens": 120}
|
fname = :TRMA57_abs
c = Combi(hessian_sparse,PDataMA57,solve_modelTRDiagAbs,preprocessMA57,decreaseFact,Tparam())
include("Template.jl")
|
{"hexsha": "6f6602ed713bc13d8963f39f761820108ede45fd", "size": 137, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Solvers/old/TRMA57_abs.jl", "max_stars_repo_name": "Goysa2/ARCTR.jl", "max_stars_repo_head_hexsha": "527772d3d25f0c7f45380bedd3060d720f5d0809", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Solvers/old/TRMA57_abs.jl", "max_issues_repo_name": "Goysa2/ARCTR.jl", "max_issues_repo_head_hexsha": "527772d3d25f0c7f45380bedd3060d720f5d0809", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Solvers/old/TRMA57_abs.jl", "max_forks_repo_name": "Goysa2/ARCTR.jl", "max_forks_repo_head_hexsha": "527772d3d25f0c7f45380bedd3060d720f5d0809", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.25, "max_line_length": 93, "alphanum_fraction": 0.8248175182, "num_tokens": 44}
|
/* Generated from orogen/lib/orogen/templates/tasks/Task.cpp */
#include "Task.hpp"
#include <imu_kvh_1750/Driver.hpp>
#include <base/samples/IMUSensors.hpp>
#include <base-logging/Logging.hpp>
#include <Eigen/Geometry>
#include <boost/numeric/conversion/cast.hpp>
using namespace imu_kvh_1750;
Task::Task(std::string const& name)
: TaskBase(name),
fd(0)
{
}
Task::Task(std::string const& name, RTT::ExecutionEngine* engine)
: TaskBase(name, engine)
{
}
Task::~Task()
{
}
/// The following lines are template definitions for the various state machine
// hooks defined by Orocos::RTT. See Task.hpp for more detailed
// documentation about them.
bool Task::configureHook()
{
if (! TaskBase::configureHook())
return false;
/************************************/
/** Configure IMU KVH 1750 driver **/
/************************************/
kvh_driver.reset(new imu_kvh_1750::Driver());
kvh_driver->setReadTimeout(base::Time::fromSeconds(_timeout.value()));
kvh_driver->open(_device.value());
fd = kvh_driver->getFileDescriptor();
if(fd < 0)
{
RTT::log(RTT::Error) << "Failed to open device. No valid file descriptor." << RTT::endlog();
return false;
}
/*************************************/
/** Configuration of Time estimator **/
/*************************************/
if(_sampling_frequency.value() <= 0.0)
{
RTT::log(RTT::Error) << "The sampling frequency has to be a positive non-zero value." << RTT::endlog();
return false;
}
timestamp_estimator.reset(new aggregator::TimestampEstimator(
base::Time::fromSeconds(20),
base::Time::fromSeconds(1.0 / _sampling_frequency.value()),
base::Time::fromSeconds(0),
INT_MAX));
return true;
}
bool Task::startHook()
{
if (! TaskBase::startHook())
return false;
RTT::extras::FileDescriptorActivity* activity =
getActivity<RTT::extras::FileDescriptorActivity>();
if (activity)
{
activity->watch(fd);
base::Time activity_timeout = base::Time::fromSeconds(2.0*_timeout.value());
activity->setTimeout(boost::numeric_cast<int>(activity_timeout.toMilliseconds()));
}
return true;
}
void Task::updateHook()
{
TaskBase::updateHook();
RTT::extras::FileDescriptorActivity* act = getActivity<RTT::extras::FileDescriptorActivity>();
if(act->hasError())
RTT::log(RTT::Warning) << "File descriptor activity has an error." << RTT::endlog();
if(act->hasTimeout())
RTT::log(RTT::Warning) << "File descriptor activity timeout." << RTT::endlog();
if(act->isUpdated(fd))
{
base::samples::IMUSensors imusamples;
kvh_driver->read();
imusamples = kvh_driver->getIMUReading();
/** rotate measurments to the local frame */
if(!_axes_orientation.value().isZero())
{
imusamples.acc = _axes_orientation.value() * imusamples.acc;
imusamples.gyro = _axes_orientation.value() * imusamples.gyro;
}
/** acceleration in m/s^2 */
imusamples.acc = imusamples.acc * GRAVITY_SI; //g to m/s^2, KVH puts out acceleration in g
if(_gyroscope_delta_rotation.value())
/** gyroscopes in rad/s, KVH puts out the integrated delta rotation */
imusamples.gyro = imusamples.gyro * _sampling_frequency.value();
/** Estimate the current timestamp */
imusamples.time = timestamp_estimator->update(imusamples.time, kvh_driver->getCounter());
/** Output information **/
_raw_sensors.write(imusamples);
_calibrated_sensors.write(imusamples);
_device_temperature.write(base::Temperature::fromCelsius(boost::numeric_cast<double>(kvh_driver->getTemperature())));
_timestamp_estimator_status.write(timestamp_estimator->getStatus());
}
}
void Task::errorHook()
{
TaskBase::errorHook();
}
void Task::stopHook()
{
TaskBase::stopHook();
RTT::extras::FileDescriptorActivity* activity =
getActivity<RTT::extras::FileDescriptorActivity>();
if (activity)
{
activity->clearAllWatches();
//set timeout back so we don't timeout on the rtt's pipe
activity->setTimeout(0);
}
timestamp_estimator->reset();
}
void Task::cleanupHook()
{
TaskBase::cleanupHook();
kvh_driver->close();
fd = 0;
timestamp_estimator.reset();
kvh_driver.reset();
}
|
{"hexsha": "06b64aea2331a486a9075f7af5027a10c1d68281", "size": 4247, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tasks/Task.cpp", "max_stars_repo_name": "Brazilian-Institute-of-Robotics/orogen-imu_kvh_1750", "max_stars_repo_head_hexsha": "58e96581a770e9300fef4f7cb522fe7747cf5018", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-06-16T12:48:07.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-16T12:48:07.000Z", "max_issues_repo_path": "tasks/Task.cpp", "max_issues_repo_name": "Brazilian-Institute-of-Robotics/orogen-imu_kvh_1750", "max_issues_repo_head_hexsha": "58e96581a770e9300fef4f7cb522fe7747cf5018", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2016-05-17T13:27:39.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-07T22:20:49.000Z", "max_forks_repo_path": "tasks/Task.cpp", "max_forks_repo_name": "Brazilian-Institute-of-Robotics/orogen-imu_kvh_1750", "max_forks_repo_head_hexsha": "58e96581a770e9300fef4f7cb522fe7747cf5018", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.050955414, "max_line_length": 118, "alphanum_fraction": 0.6576406875, "num_tokens": 1041}
|
[STATEMENT]
lemma Pair_Agent: "Pair X Y \<noteq> Agent X'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Messages.Pair X Y \<noteq> Messages.Agent X'
[PROOF STEP]
by transfer auto
|
{"llama_tokens": 75, "file": "Key_Agreement_Strong_Adversaries_Messages", "length": 1}
|
"""
~~ bluestoned ~~
detect chroma keys in video and image files
(c) 2019 Nik Cubrilovic <git@nikcub.me>
"""
import argparse
import os
import sys
import time
import logging
import requests
import shutil
import tempfile
__version__ = '0.1.2'
try:
import cv2
except ImportError:
print("Error importing opencv make sure all requirements are satisfied.")
print("see OpenCV install docs for your platform")
import numpy as np
import verboselogs
from tqdm import tqdm
verboselogs.install()
LOG = logging.getLogger("main")
# this is the upper and lower blue color bounds we are searching for
# opencv HSV values differ from most image editing programs hence the calcs
# BLUE_LOWER = np.array([232 / 2, 0.81 * 255, 0.75 * 255])
# BLUE_UPPER = np.array([240 / 2, 1 * 255, 1 * 255])
BLUE_LOWER = np.array([240 / 2, 0.81 * 255, 0.75 * 255])
BLUE_UPPER = np.array([243 / 2, 1 * 255, 1 * 255])
VALID_VIDEO_EXT = set(
[
".mp4",
".mkv",
".avi",
".webm"
]
)
VALID_IMAGE_EXT = set([".jpeg", ".jpg", ".png", ".gif", ".bmp", ".tiff"])
def analyze_image(
img_file,
threshold=0,
show_detections=False,
save_detections=False,
save_detections_path=None,
):
LOG.debug(
"Running analyze_image with img_file=%s save_detections=%s save_detections_path=%s",
img_file,
save_detections,
save_detections_path,
)
LOG.debug("Analyzing image %s", img_file)
if not os.path.isfile(img_file):
raise Exception("Not an image: {}".format(img_file))
frame = cv2.imread(img_file)
mask, mask_count = _get_mask_for_frame(frame)
if mask_count > threshold:
LOG.info("Found screen mask %d in image %s", mask_count, img_file)
_draw_bounding_boxes(frame, mask, mask_count)
else:
return False
result_directory = "output" if not save_detections_path else save_detections_path
result_directory = os.path.realpath(result_directory)
LOG.debug("Setting results directory to %s", result_directory)
if not os.path.isdir(result_directory):
LOG.debug("No results directory so creating")
os.mkdir(result_directory)
result_filename = _get_result_filename(img_file)
_draw_timestamp(frame, " {}".format(mask_count))
if show_detections:
cv2.imshow(result_filename, frame)
# keep the image open until the user hits q
while True:
if cv2.waitKey(5) & 0xFF == ord("q"):
break
if save_detections:
result_filepath = os.path.join(result_directory, result_filename)
LOG.info("Saving result file to %s", result_filepath)
cv2.imwrite(result_filepath, frame)
cv2.destroyAllWindows()
return True
def analyze_video(
video_file,
threshold=0,
max_only=True,
output_video=None,
show_detections=False,
save_detections=False,
save_detections_path=None,
):
"""
analyze a video to spot bluescreens/chroma keys and write the result
"""
LOG.debug(
"Running analyze_video with video_file=%s save_detections=%s save_detections_path=%s",
video_file,
save_detections,
save_detections_path,
)
if not os.path.isfile(video_file):
raise Exception("Invalid video file: {}".format(video_file))
fps_read_rate = 30
start_time = time.time()
cap = cv2.VideoCapture(video_file)
time.sleep(3)
frame_count = 0
frames_processed = 0
fps = cap.get(cv2.CAP_PROP_FPS)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
frame_total_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
LOG.info(
"Opened %s (%ffps %dx%d) with %s total frames",
video_file,
fps,
frame_width,
frame_height,
frame_total_length,
)
out = None
max_mask_count = 0
max_mask_time = None
max_mask_frame = None
detections = 0
result_directory = "outputs" if not save_detections_path else save_detections_path
result_directory = os.path.realpath(os.path.expanduser(result_directory))
LOG.debug("Setting results directory to %s", result_directory)
if not os.path.isdir(result_directory):
LOG.debug("No results directory so creating")
os.mkdir(result_directory)
if output_video:
output_video_extension = "mp4"
output_video_file = "{}-output.{}".format(
os.path.splitext(video_file)[0], output_video_extension
)
output_video_path = os.path.join(result_directory, output_video_file)
out = cv2.VideoWriter(
output_video_path,
cv2.VideoWriter_fourcc(
"M", "J", "P", "G"
), # @TODO this is a shit format find something better
10,
(frame_width, frame_height),
)
LOG.verbose("Saving output video to %s", output_video_path)
pbar = tqdm(total=frame_total_length, unit="frames")
while cap.isOpened():
success, frame = cap.read()
if not success:
LOG.verbose("End of video")
pbar.update(frame_total_length)
break
frame_count += 1
# this is a bit of a hack but we know the source is < 60fps
if int(fps) == 60 and (frame_count % 2 == 0):
pbar.update(1)
continue
frames_processed += 1
pbar.update(1)
mask, mask_count = _get_mask_for_frame(frame)
if not mask_count and max_mask_count:
LOG.info(
"Mask count %d found in %s at %s",
max_mask_count,
video_file,
_time_conver_ms_to_timestring(max_mask_time),
)
result_filename = _get_result_filename(video_file, "jpeg", detections)
if save_detections:
result_filepath = os.path.join(result_directory, result_filename)
LOG.info("Saving result file to %s", result_filepath)
cv2.imwrite(result_filepath, max_mask_frame)
if show_detections:
cv2.imshow(result_filename, max_mask_frame)
detections += 1
if not mask_count:
max_mask_count = 0
if mask_count > threshold:
max_mask_time = cap.get(cv2.CAP_PROP_POS_MSEC)
LOG.verbose(
"Mask count %d found in %s at %s (%d)",
mask_count,
video_file,
_time_conver_ms_to_timestring(max_mask_time),
frame_count,
)
frame = _draw_bounding_boxes(frame, mask, mask_count)
_draw_timestamp(
frame,
"{} ({})".format(
_time_conver_ms_to_timestring(max_mask_time), mask_count
),
)
# save the peak mask_count frame for this detection
if mask_count > max_mask_count:
max_mask_frame = frame
if out:
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if out:
out.release()
cap.release()
cv2.destroyAllWindows()
time_finish = time.time()
LOG.info(
"Processed %d of %d frames in %s and found %d key frames in %s",
frames_processed,
frame_count,
video_file,
detections,
_time_conver_ms_to_timestring(start_time - time_finish),
)
return True
def _get_mask_for_frame(frame):
" for a frame get the mask pixel count "
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame_hsv = cv2.medianBlur(frame_hsv, 5)
mask = cv2.inRange(frame_hsv, BLUE_LOWER, BLUE_UPPER)
mask_count = cv2.countNonZero(mask)
return mask, mask_count
def _draw_bounding_boxes(frame, mask, mask_count=0):
" draw the bounding boxes on the frame based on the mask "
# find the countour from the bitmask
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_sorted = sorted(contours, key=cv2.contourArea, reverse=True)[:5]
# draw the rectangle around the contour
x, y, w, h = cv2.boundingRect(contours_sorted[0])
if mask_count < 3:
# cv2.imshow('Frame', frame)
rec_color = (255, 0, 0)
rec_size = 5
elif mask_count >= 3 and mask_count <= 30:
rec_color = (0, 255, 0)
rec_size = 5
else:
rec_color = (0, 0, 255)
rec_size = 5
cv2.rectangle(frame, (x - 30, y - 30), (x + w + 60, y + h + 60), rec_color, rec_size)
return frame
def _draw_timestamp(frame, timestamp, height_from_bottom=30):
_height, _width, _ = frame.shape
_font = cv2.FONT_HERSHEY_PLAIN
_color = (255, 255, 255)
text_y = _height - height_from_bottom
try:
cv2.putText(frame, str(timestamp), (50, text_y), _font, 4, _color, 3, cv2.LINE_AA)
return True
except Exception as ex:
LOG.error("Could not write timestamp to frame: %s", str(ex))
return False
def _get_result_filename(filename, extension=None, index=None):
_components = os.path.splitext(os.path.basename(filename))
file_name = _components[0]
file_extension = extension or _components[1][1:]
index_format = ""
if isinstance(index, int):
index_format = "-{0:05d}".format(index)
return "{}-result{}.{}".format(file_name, index_format, file_extension)
def _time_conver_ms_to_timestring(millis):
" convert millisecond time delta as a float into a string describing time in HH:mm:ss "
millis = float(millis)
seconds = (millis / 1000) % 60
minutes = (millis / (1000 * 60)) % 60
hours = (millis / (1000 * 60 * 60)) % 24
return "{0:02d}:{1:02d}.{2:02d}".format(int(hours), int(minutes), int(seconds))
def _setup_logger(verbosity, log_file):
log = logging.getLogger("main")
for h in log.handlers:
log.removeHandler(h)
log_formatter = logging.Formatter("[%(levelname)s] %(message)s")
sh = logging.StreamHandler()
sh.setFormatter(log_formatter)
log.addHandler(sh)
if log_file:
fh = logging.FileHandler(filename=log_file)
fh.setFormatter(log_formatter)
log.addHandler(fh)
log.setLevel(logging.INFO)
if verbosity >= 2:
log.setLevel(logging.DEBUG)
elif verbosity >= 1:
log.setLevel(logging.VERBOSE)
elif verbosity >= 0:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.ERROR)
return log
def _download_file(url):
local_filename = url.split('/')[-1]
with requests.get(url, stream=True) as rh, tempfile.NamedTemporaryFile(mode="wb", delete=False) as fh:
# rh.status_code
# with open(local_filename, 'wb') as f:
shutil.copyfileobj(rh.raw, fh)
return local_filename
def analyze_dir(_dir, **kwargs):
""" walk a directory and analyze videos """
if not os.path.isdir(_dir):
raise Exception("Invalid directory: {}".format(_dir))
ret = 0
for file_name in os.listdir(_dir):
_file_ex = os.path.splitext(file_name)[1]
if _file_ex in VALID_VIDEO_EXT:
ret += analyze_video(os.path.join(_dir, file_name), **kwargs)
if _file_ex in VALID_IMAGE_EXT:
kwargs.pop("output_video", None)
ret += analyze_image(os.path.join(_dir, file_name), **kwargs)
return ret
def analyze_file(_file, **kwargs):
_file_ex = os.path.splitext(_file)[1]
if _file_ex in VALID_VIDEO_EXT:
return analyze_video(_file, **kwargs)
elif _file_ex in VALID_IMAGE_EXT:
return analyze_image(_file, **kwargs)
else:
raise Exception("Not a valid path {}".format(_file))
return False
def run_tests(_path):
if not os.path.isdir(_path):
raise Exception("invalid tests folder {}".format(_path))
_true_path = os.path.join(_path, "true")
_false_path = os.path.join(_path, "false")
if not os.path.isdir(_true_path):
raise Exception("invalid tests true path {}".format(_true_path))
if not os.path.isdir(_false_path):
raise Exception("invalid tests false path {}".format(_false_path))
_thresh = 8
for file_name in os.listdir(_true_path):
_val = analyze_image(
os.path.join(_true_path, file_name),
threshold=_thresh
)
if not _val:
LOG.error("Failed test for {}".format(file_name))
for file_name in os.listdir(_false_path):
_val = analyze_image(
os.path.join(_false_path, file_name),
threshold=_thresh
)
if _val:
LOG.error("Failed test for {}".format(file_name))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("path", help="Path or file to evaluate")
parser.add_argument(
"-o", dest="save_video", help="save output video with bounding boxes"
)
parser.add_argument(
"-s",
"--save",
dest="save_detections",
action="store_true",
default=False,
help="save detections as images",
)
parser.add_argument(
"-t",
"--threshold",
dest="threshold",
default=3,
type=int,
help="set threshold (default: 3)",
)
parser.add_argument("-f", dest="log_file", default=None, help="save logs to file")
parser.add_argument(
"-p",
dest="save_detections_path",
default="output",
help="save detection images to path (default: output)",
)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=0,
help="verbose output (repeat for increased verbosity)",
)
parser.add_argument(
"-d",
"--debug",
dest="debug",
action="store_true",
default=False,
help="run in debug mode",
)
parser.add_argument(
"-q",
"--quiet",
action="store_const",
const=-1,
default=0,
dest="verbosity",
help="quiet output (show errors only)",
)
parser.add_argument(
"--test",
dest="test",
action="store_true",
default=False,
help="run tests against test folder",
)
args = parser.parse_args()
if args.debug:
args.verbosity = 5
_setup_logger(args.verbosity, args.log_file)
print("bluestoned v{}".format(__version__))
LOG.debug("Running with %s \n %r", sys.argv, args)
LOG.debug("OpenCV version %s", cv2.__version__)
LOG.debug(cv2.getBuildInformation())
_path = os.path.realpath(args.path)
if args.test:
return run_tests(_path)
if os.path.isdir(_path):
return analyze_dir(
_path,
threshold=args.threshold,
output_video=args.save_video,
save_detections=args.save_detections,
save_detections_path=args.save_detections_path,
)
else:
return analyze_file(
_path,
threshold=args.threshold,
save_detections=bool(args.save_detections),
save_detections_path=args.save_detections_path,
)
def cli():
try:
sys.exit(main())
except KeyboardInterrupt:
print("Interrupted")
except Exception as err:
LOG.error(str(err))
LOG.debug("", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
cli()
|
{"hexsha": "97b5d26eabe5ec01ebed15b3af03c9bf8ea9ba7d", "size": 15461, "ext": "py", "lang": "Python", "max_stars_repo_path": "bluestoned/main.py", "max_stars_repo_name": "nc9/bluestoned", "max_stars_repo_head_hexsha": "f39fedcc9bb61ca56242687aed36917cd9402b9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bluestoned/main.py", "max_issues_repo_name": "nc9/bluestoned", "max_issues_repo_head_hexsha": "f39fedcc9bb61ca56242687aed36917cd9402b9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bluestoned/main.py", "max_forks_repo_name": "nc9/bluestoned", "max_forks_repo_head_hexsha": "f39fedcc9bb61ca56242687aed36917cd9402b9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5652920962, "max_line_length": 106, "alphanum_fraction": 0.6140611862, "include": true, "reason": "import numpy", "num_tokens": 3701}
|
#include <action_executor/action_executor.h>
#include <chrono>
#include <thread>
#include <boost/bind.hpp>
#include <fstream>
#define SHOOT_TYPE_DEFAULT 0
#define EXECUTOR_IDLE 200
#define K_GIMBAL 1.0
#define K_DRONE 1.0
#define K_DRONE_YAW 0.3
#define GIMBAL_FREQ 0.1
#define TRAILER_FREQ 0.1
#define MAIN_FREQ 0.033
#define RT_FREQ 0.033
#define TO_DEG 180/M_PI
#define TO_RAD M_PI/180
#define MAX_DRONE_SPEED 1
#define K1_GIMBAL 1.0
#define K2_GIMBAL 0.5
#define K_OFFSET 2.0
// Constructor
Executor::Executor(int _argc, char** _argv)
{
ros::init(_argc, _argv, "Executor");
ros::NodeHandle nh;
ros::NodeHandle pnh("~");
pnh.param<int>("drone_id", drone_id_, 1);
pnh.param<float>("k1_gimbal", k_1, K1_GIMBAL);
pnh.param<float>("k2_gimbal", k_2, K2_GIMBAL);
pnh.param<float>("k_offset", k_offset, K_OFFSET);
pnh.param<int>("image_width", w_im, 640);
pnh.param<int>("image_height",h_im, 480);
pnh.param<float>("MAX_YAW_RATE", MAX_YAW_RATE, 0.2);
pnh.param<double>("trailer", trailer_size, 0.5);
std::vector<double> p_C_B_aux;
pnh.getParam("p_C_B",p_C_B_aux);
p_C_B.x() = p_C_B_aux[0];
p_C_B.y() = p_C_B_aux[1];
p_C_B.z() = p_C_B_aux[2];
pnh.param<bool>("simulation",simulation,true);
pnh.param<bool>("onboard",onboard, true);
pnh.param<bool>("offset", offset, false);
pnh.param<bool>("f_target", f_target, false);
target_image_offset_x_MAX = w_im/2;
target_image_offset_x_MIN = -target_image_offset_x_MAX;
target_image_offset_x_13 = w_im/3-w_im/2;
target_image_offset_x_23 = w_im*2/3-w_im/2;
target_image_offset_y_MAX = h_im/2;
target_image_offset_y_MIN = -target_image_offset_y_MAX;
target_image_offset_y_13 = h_im/3-h_im/2;
target_image_offset_y_23 = h_im*2/3-h_im/2;
ROS_INFO("Setting up Executor %d", drone_id_);
// Subscribers
ros::Subscriber target_array_sub_;
if (onboard)
target_array_sub_ = nh.subscribe<multidrone_msgs::TargetStateArray>("/target_3d_state", 1, &Executor::targetarrayCallback, this);
else
target_array_sub_ = nh.subscribe<multidrone_msgs::TargetStateArray>("/targets_pose", 1, &Executor::targetarrayCallback, this);
ros::Subscriber drone_pose_sub = nh.subscribe<geometry_msgs::PoseStamped>("ual/pose", 1, &Executor::dronePose, this);
ros::Subscriber drone_vel_sub = nh.subscribe<geometry_msgs::TwistStamped>("ual/velocity",1, &Executor::droneVelocity, this);
ros::Subscriber drone_state_sub = nh.subscribe<uav_abstraction_layer::State>("ual/state",1, &Executor::droneState, this);
ros::Subscriber gimbal_status_sub = nh.subscribe<multidrone_msgs::GimbalStatus>("gimbal/status",1, &Executor::gimbalStatus, this);
ros::Subscriber focus_sub_;
if(f_target)
focus_sub_ = nh.subscribe<std_msgs::Int32>("visual_analysis/focus_value_target", 1, &Executor::focusCallback, this);
else
focus_sub_ = nh.subscribe<std_msgs::Int32>("visual_analysis/focus_value_image", 1, &Executor::focusCallback, this);
// ########### Communication with SWAP ########## //
ros::Subscriber confl_warning_sub_ = nh.subscribe("collision_warning", 1, &Executor::collisionWarningCallback, this);
wished_mov_dir_pub_ = nh.advertise<geometry_msgs::Vector3>("wished_movement_direction",1 , true); // the final true is required
ros::Subscriber avoid_mov_dir_sub_ = nh.subscribe("avoid_movement_direction", 1, &Executor::avoidMovementCallback, this);
// ########### ####################### ########### //
// Publishers
gimbal_cmd_basecam_pub = nh.advertise<geometry_msgs::Vector3>("gimbal/cmd",1);
go_to_waypoint_pub_ = nh.advertise<geometry_msgs::PoseStamped>("ual/set_pose",1);
set_velocity_pub_ = nh.advertise<geometry_msgs::TwistStamped>("ual/set_velocity",1);
trajectory_ = nh.advertise<nav_msgs::Odometry>("desired_trajectory",1);
lyapunov_ = nh.advertise<geometry_msgs::Point32>("gimbal_lyapunov",1);
pixel_publish = nh.advertise<geometry_msgs::Vector3>("pixel_position",1);
// Service Client
cmd_long_client_ = nh.serviceClient<mavros_msgs::CommandLong>("mavros/cmd/command");
set_param_client_ = nh.serviceClient<mavros_msgs::ParamSet>("mavros/param/set");
get_param_client_ = nh.serviceClient<mavros_msgs::ParamGet>("mavros/param/get");
take_off_client_ = nh.serviceClient<uav_abstraction_layer::TakeOff>("ual/take_off");
go_to_waypoint_client_ = nh.serviceClient<uav_abstraction_layer::GoToWaypoint>("ual/go_to_waypoint");
land_client_ = nh.serviceClient<uav_abstraction_layer::Land>("ual/land");
follow_target_client_ = nh.serviceClient<multidrone_msgs::FollowTarget>("follow_target");
set_framing_type_client_ = nh.serviceClient<multidrone_msgs::SetFramingType>("visual_analysis/set_framing_type");
camera_control_client_ = nh.serviceClient<multidrone_msgs::CameraControl>("camera_control");
// Service Server
ros::ServiceServer manual_controls_service_ = nh.advertiseService("manual_controls", &Executor::ManualControlServiceCallback, this);
if(simulation) {
std::vector<double> camera_matrix;
double cols, rows;
pnh.getParam("camera_matrix/rows",rows);
pnh.getParam("camera_matrix/cols",cols);
camera_matrix.reserve(cols*rows);
pnh.getParam("camera_matrix/data",camera_matrix);
K << K_min , 0.000000, 319.5, // BMMCC camera_matrix minimum zoom
0.000000, K_min, 239.5,
0.000000, 0.000000, 1.000;
ROS_INFO("Executor [%d] initialized in simulation mode!", drone_id_);
// Initialise gimbal cmd
#ifdef MAVROS_VERSION_BELOW_0_25_0
gimbal_cmd_.request.command = mavros_msgs::CommandCode::CMD_DO_MOUNT_CONTROL;
#else
gimbal_cmd_.request.command = mavros_msgs::CommandCode::DO_MOUNT_CONTROL;
#endif
gimbal_cmd_.request.confirmation = false;
gimbal_cmd_.request.param7 = 2; // MAV_MOUNT_MODE::MAV_MOUNT_MODE_MAVLINK_TARGETING
p_C_B << -0.051, 0, 0.162; // gimbal position with respect to drone CG
// Set MNT_MODE parameters on FCU for gimbal control via Mavlink
setMountModeParameters();
}
else{
K << K_min , 0.000000, 319.5, // BMMCC camera_matrix minimum zoom
0.000000, K_min, 239.5,
0.000000, 0.000000, 1.000;
ROS_INFO("Executor [%d] initialized in real mode", drone_id_);
}
multidrone_msgs::ManualControls manual_control_msg_;
manual_control_msg_.request.control = multidrone_msgs::ManualControls::Request::CAMERA_RESET;
ManualControlServiceCallback(manual_control_msg_.request, manual_control_msg_.response);
w_est_ << 0.0, 0.0, 0.0;
w_est << 0.0, 0.0, 0.0;
R_S_I << 1, 0, 0,
0, -1, 0,
0, 0, -1;
// std::cout << "Gimbal position is \n" << p_C_B << "\n\n";
// Wait until having initial drone pose
while (!has_drone_pose_ || !has_drone_vel_ && ros::ok()) {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
ros::spinOnce();
}
// action service
server_ = new Server(nh, "action_server", false);
server_->registerGoalCallback(boost::bind(&Executor::actionCallback, this));
server_->registerPreemptCallback(boost::bind(&Executor::preemptCallback, this));
server_->start();
t_0 = ros::Time::now().toSec();
// start Timer
timer_ = nh.createTimer(ros::Duration(MAIN_FREQ), &Executor::timerCallback, this); //33Hz
timer_trailer = nh.createTimer(ros::Duration(TRAILER_FREQ), &Executor::timerCallbackTrailer, this, false, false); //10Hz
timer_gimbal = nh.createTimer(ros::Duration(GIMBAL_FREQ), &Executor::timerCallbackGimbal, this, false, false); //5Hz
timer_rt = nh.createTimer(ros::Duration(RT_FREQ), &Executor::referenceTrajectory, this, false, false); //33Hz
ROS_INFO("Executor [%d] running", drone_id_);
ros::MultiThreadedSpinner spinner(4); // Use 4 threads
spinner.spin();
}
// Destructor
Executor::~Executor()
{
delete server_;
};
// Timer callback
void Executor::timerCallback(const ros::TimerEvent&)
{
lyapunov_publisher();
if(confl_warning_){
setpoint_vel_.twist.linear.x = avoid_mov_direction_.x;
setpoint_vel_.twist.linear.y = avoid_mov_direction_.y;
setpoint_vel_.twist.linear.z = 0;
set_velocity_pub_.publish(setpoint_vel_);
}
else
{
switch (action_type_){
case multidrone_msgs::DroneAction::TYPE_TAKEOFF:{
timer_trailer.stop();
takeOff();
multidrone_msgs::ManualControls manual_control_msg_;
manual_control_msg_.request.control = multidrone_msgs::ManualControls::Request::FOCUS_RESET;
ManualControlServiceCallback(manual_control_msg_.request, manual_control_msg_.response);
break;
}
case multidrone_msgs::DroneAction::TYPE_LAND:
running_trailer = false;
timer_trailer.stop();
timer_gimbal.stop();
timer_trajectory.stop();
gimbalSafeMode();
land();
break;
case multidrone_msgs::DroneAction::TYPE_GOTOWAYPOINT:
timer_trailer.stop();
timer_gimbal.stop();
timer_trajectory.stop();
gimbalSafeMode();
running_trailer = false;
feedback_.status= true;
server_->publishFeedback(feedback_);
goToWaypoint();
break;
case multidrone_msgs::DroneAction::TYPE_SHOOTING:
if (!running_trailer) {
timer_gimbal.start();
if (parameters_set){
if (list_wp_.size() == 1 || list_wp_.size() == 0)
orientation_0 = 0;
else
orientation_0 = atan2 (list_wp_[1].point.y - list_wp_[0].point.y, list_wp_[1].point.x - list_wp_[0].point.x);
gimbalSafeMode();
running_trailer = true;
trailer.init(orientation_0,rt_target_pos_);
timer_trailer.start();
timer_trajectory.start();
parameters_set = false;
feedback_.status= true;
server_->publishFeedback(feedback_);
// std::cout << "GOT A SA\n\n";
}
}
break;
default:
if (running_trailer) {
timer_trailer.stop();
timer_gimbal.stop();
timer_trajectory.stop();
running_trailer = false;
gimbalSafeMode();
}
}
}
}
void Executor::timerCallbackGimbal(const ros::TimerEvent&)
{
if (has_gimbal_status_ || simulation) {
if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::NONE)
gimbalAutomatic(pan_s,tilt_s,pan_e,tilt_e, duration_.data.sec, start_time);
else
gimbalController();
}else
gimbalSafeMode();
if(shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL || shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL_GPS)
if(has_visual_2D_target_status_)
CameraControl();
if (count < count_lim/4)
K(0) = K_min;
else if (count < count_lim/2)
K(0) = (3*K_min+K_max)/4;
else if (count < 3*count_lim/4)
K(0) = (K_min+3*K_max)/4;
else
K(0) = K_max;
K(4) = K(0);
double t_now;
t_now = ros::Time::now().toSec();
if (t_now - time_of_last_gimbal_status > 1)
has_gimbal_status_ = false;
if (t_now - time_of_last_target_status > 1)
has_shooting_target_status_ = false;
if (t_now - time_of_last_target_visual_2D_status > 1)
has_visual_2D_target_status_ = false;
if (t_now - time_of_last_drone_pose > 1)
has_drone_pose_ = false;
}
void Executor::timerCallbackTrailer(const ros::TimerEvent&)
{
if (has_rt_target_status_){
trailer.run_trailer(rt_target_pos_);
followVehicle();
trajectory_publisher();
}
has_rt_target_status_ = false;
}
void Executor::trajectory_publisher ()
{
nav_msgs::Odometry _msg;
desired_point << trailer.desired_point_, trailer.altitude;
desired_vel << trailer.v_desired;
_msg.header.stamp = ros::Time::now();
_msg.header.frame_id = "map";
_msg.child_frame_id = "drone_" + std::to_string(drone_id_) + "/base_link";
_msg.pose.pose.position.x = desired_point[0];
_msg.pose.pose.position.y = desired_point[1];
_msg.pose.pose.position.z = trailer.altitude;
_msg.pose.pose.orientation.x = 0;
_msg.pose.pose.orientation.y = 0;
_msg.pose.pose.orientation.z = 0;
_msg.pose.pose.orientation.w = 1;
_msg.twist.twist.linear.x = desired_vel[0];
_msg.twist.twist.linear.y = desired_vel[1];
_msg.twist.twist.linear.z = 0;
_msg.twist.twist.angular.x = 0;
_msg.twist.twist.angular.y = 0;
_msg.twist.twist.angular.z = setpoint_vel_.twist.angular.z;
trajectory_.publish(_msg);
}
void Executor::lyapunov_publisher()
{
geometry_msgs::Point32 _msg;
_msg.x = lyapunov_visual;
_msg.y = drone_yaw_;
lyapunov_.publish(_msg);
}
void Executor::targetarrayCallback(const multidrone_msgs::TargetStateArray::ConstPtr& _msg) // real target callback
{
//rt target
if (rt_mode_ != multidrone_msgs::ShootingAction::RT_MODE_VIRTUAL_TRAJ){
for (auto target:_msg->targets) {
if (target.target_id == rt_id_) {
tf::twistMsgToEigen(target.velocity.twist, rt_target_twist_);
if (rt_mode_ == multidrone_msgs::ShootingAction::RT_MODE_ACTUAL_TARGET)
tf2::fromMsg(target.pose.pose.position, rt_target_pos_);
targetStatus(true);
}
}
}
//gimbal target
if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::GPS || shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL_GPS){
for (auto target:_msg->targets) {
if (target.target_id == shooting_id_) {
tf2::fromMsg(target.pose.pose.position,shooting_target_pos_);
tf2::fromMsg(target.pose.pose.orientation,shooting_target_att_);
tf::twistMsgToEigen(target.velocity.twist, shooting_target_twist_);
targetStatus(false);
}
}
}
}
void Executor::referenceTrajectory(const ros::TimerEvent&){
if (rt_mode_ == multidrone_msgs::ShootingAction::RT_MODE_VIRTUAL_TRAJ){
rt_target_pos_.x() = rt_target_pos_.x() + total_displ.x()*formation_speed_*RT_FREQ/(total_displ.norm());
rt_target_pos_.y() = rt_target_pos_.y() + total_displ.y()*formation_speed_*RT_FREQ/(total_displ.norm());
rt_target_pos_.z() = rt_target_pos_.z() + total_displ.z()*formation_speed_*RT_FREQ/(total_displ.norm());
if (abs(rt_target_pos_.x()-list_wp_[0].point.x) >= abs(total_displ.x()) && abs(rt_target_pos_.y()-list_wp_[0].point.y) >= abs(total_displ.y()) && abs(rt_target_pos_.z()-list_wp_[0].point.z) >= abs(total_displ.z()))
formation_speed_=0;
targetStatus(true);
}
if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::VIRTUAL){
shooting_target_pos_.x() = shooting_target_pos_.x() + total_displ.x()*formation_speed_*RT_FREQ/(total_displ.norm());
shooting_target_pos_.y() = shooting_target_pos_.y() + total_displ.y()*formation_speed_*RT_FREQ/(total_displ.norm());
shooting_target_pos_.z() = shooting_target_pos_.z() + total_displ.z()*formation_speed_*RT_FREQ/(total_displ.norm());
desired_yaw = atan2 (total_displ.y() - list_wp_[0].point.y, total_displ.x() - list_wp_[0].point.x);
shooting_target_att_.z() = sin(desired_yaw/2);
shooting_target_att_.y() = 0;
shooting_target_att_.x() = 0;
shooting_target_att_.w() = cos(desired_yaw/2);
shooting_target_twist_.x() = total_displ.x()*formation_speed_/(total_displ.norm());
shooting_target_twist_.y() = total_displ.y()*formation_speed_/(total_displ.norm());
shooting_target_twist_.z() = total_displ.z()*formation_speed_/(total_displ.norm());
if (abs(shooting_target_pos_.x()-list_wp_[0].point.x) >= abs(total_displ.x()) && abs(shooting_target_pos_.y()-list_wp_[0].point.y) >= abs(total_displ.y()) && abs(shooting_target_pos_.z()-list_wp_[0].point.z) >= abs(total_displ.z()))
formation_speed_=0;
targetStatus(false);
}
}
void Executor::targetStatus(bool rt)
{
if(rt)
has_rt_target_status_ = true;
else{
p_T_I = R_S_I*shooting_target_pos_;
if(shooting_mode_ == multidrone_msgs::TargetIdentifierType::GPS || shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL_GPS){
geometry_msgs::Vector3 pixel_msg;
q_I = p_T_I - p_B_I - R_B_I*p_C_B;
q_C = R_C_I.transpose()*q_I;
q_C2.x() = q_C.y()/q_C.z();
q_C2.y() = -q_C.x()/q_C.z();
q_C2.z() = q_C.z()/q_C.z();
pixel = K*q_C2;
pixel_msg.x = pixel[0];
pixel_msg.y = pixel[1];
pixel_msg.z = pixel[2];
pixel_publish.publish(pixel_msg);
}
has_shooting_target_status_ = true;
}
}
void Executor::focusCallback(const std_msgs::Int32::ConstPtr& _msg)
{
focus_value = _msg->data;
}
void Executor::run_w_estimate()
{
Eigen::Vector3d w_est_dot;
dT_w = GIMBAL_FREQ;
w_est_dot = k_2*w_e;
w_est = w_est_+w_est_dot*dT_w;
w_est_ = w_est;
}
// Drone pose callback
void Executor::dronePose(const geometry_msgs::PoseStamped::ConstPtr& _msg)
{
tf::pointMsgToEigen(_msg->pose.position, drone_pos_);
tf::quaternionMsgToEigen(_msg->pose.orientation, drone_att_);
drone_yaw_ = atan2(2*(drone_att_.w()*drone_att_.z()+drone_att_.x()*drone_att_.y()),1-2*(drone_att_.y()*drone_att_.y()+drone_att_.z()*drone_att_.z()));
R_B_S = drone_att_.normalized().toRotationMatrix();
R_B_I = R_S_I*R_B_S;
p_B_I = R_S_I*drone_pos_;
time_of_last_drone_pose = ros::Time::now().toSec();
has_drone_pose_ = true;
}
// Drone velocity callback
void Executor::droneVelocity(const geometry_msgs::TwistStamped::ConstPtr& _msg)
{
tf::twistMsgToEigen(_msg->twist, drone_twist_);
drone_vel_ << drone_twist_(0,0), drone_twist_(1,0), drone_twist_(2,0);
drone_ang_vel_ << drone_twist_(3,0), drone_twist_(4,0), drone_twist_(5,0);
p_B_I_dot = R_S_I*drone_vel_;
w_B = drone_ang_vel_;
has_drone_vel_ = true;
}
// Drone state callback
void Executor::droneState(const uav_abstraction_layer::State::ConstPtr& _msg)
{
drone_state_.state = _msg->state;
}
// Gimbal status callback
void Executor::gimbalStatus(const multidrone_msgs::GimbalStatus::ConstPtr& _msg)
{
tf::quaternionMsgToEigen(_msg->orientation, gimbal_att_);
R_C_I = Eigen::AngleAxisd(-M_PI/2, Eigen::Vector3d::UnitZ())*Eigen::AngleAxisd(-M_PI, Eigen::Vector3d::UnitX())*gimbal_att_.normalized().toRotationMatrix();
time_of_last_gimbal_status = ros::Time::now().toSec();
has_gimbal_status_ = true;
}
bool Executor::ManualControlServiceCallback(multidrone_msgs::ManualControls::Request& req, multidrone_msgs::ManualControls::Response& res)
{
multidrone_msgs::CameraControl camera_control_msg_;
if (req.control == multidrone_msgs::ManualControls::Request::GIMBAL_MOVE_UP)
target_image_offset_y -= target_image_offset_step;
else if (req.control == multidrone_msgs::ManualControls::Request::GIMBAL_MOVE_DOWN)
target_image_offset_y += target_image_offset_step;
else if (req.control == multidrone_msgs::ManualControls::Request::GIMBAL_MOVE_LEFT)
target_image_offset_x -= target_image_offset_step;
else if (req.control == multidrone_msgs::ManualControls::Request::GIMBAL_MOVE_RIGHT)
target_image_offset_x += target_image_offset_step;
else if (req.control == multidrone_msgs::ManualControls::Request::CAMERA_ZOOM_IN){
if (count < 20){
count++;
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_ZOOM;
camera_control_msg_.request.value = multidrone_msgs::CameraControl::Request::VALUE_INCREASE;
camera_control_client_.call(camera_control_msg_);
}
}
else if (req.control == multidrone_msgs::ManualControls::Request::CAMERA_ZOOM_OUT){
if (count > 0){
count--;
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_ZOOM;
camera_control_msg_.request.value = multidrone_msgs::CameraControl::Request::VALUE_DECREASE;
camera_control_client_.call(camera_control_msg_);
}
}
else if (req.control == multidrone_msgs::ManualControls::Request::CONTROL_FOCUS_INCREASE) {
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_FOCUS;
if(cmd < 212)
cmd += inc;
camera_control_msg_.request.value = cmd;
camera_control_client_.call(camera_control_msg_);
}
else if (req.control == multidrone_msgs::ManualControls::Request::CONTROL_FOCUS_DECREASE) {
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_FOCUS;
if(cmd > 44)
cmd -= inc;
camera_control_msg_.request.value = cmd;
camera_control_client_.call(camera_control_msg_);
}
else if (req.control == multidrone_msgs::ManualControls::Request::CONTROL_RECORD_START) {
if(!recording){
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_RECORD;
camera_control_msg_.request.value = multidrone_msgs::CameraControl::Request::VALUE_INCREASE;
camera_control_client_.call(camera_control_msg_);
recording = true;
} else ROS_WARN("Already recording");
}
else if (req.control == multidrone_msgs::ManualControls::Request::CONTROL_RECORD_STOP) {
if(recording){
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_RECORD;
camera_control_msg_.request.value = multidrone_msgs::CameraControl::Request::VALUE_DECREASE;
camera_control_client_.call(camera_control_msg_);
recording = false;
} else ROS_WARN("Not recording");
}
else if (req.control == multidrone_msgs::ManualControls::Request::CAMERA_RESET) {
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CAMERA_RESET;
camera_control_client_.call(camera_control_msg_);
count = 0;
} else if (req.control == multidrone_msgs::ManualControls::Request::FOCUS_RESET) {
focus_reset_tree(54,92);
} else if (req.control == multidrone_msgs::ManualControls::Request::GIMBAL_RETURN_HOME) {
gimbal_home = true;
}
if (target_image_offset_x > target_image_offset_x_MAX)
target_image_offset_x = target_image_offset_x_MAX;
else if (target_image_offset_x < target_image_offset_x_MIN)
target_image_offset_x = target_image_offset_x_MIN;
if (target_image_offset_y > target_image_offset_y_MAX)
target_image_offset_y = target_image_offset_y_MAX;
else if (target_image_offset_y < target_image_offset_y_MIN)
target_image_offset_y = target_image_offset_y_MIN;
return true;
}
void Executor::focus_reset_tree(int beg, int end){
// std::cout << " Starting Auto Focus" << "\n";
int BEST, low_left, high_left = -1, low_right = -1, high_right, curr, curr_left, curr_right;
bool stop_left, stop_right;
multidrone_msgs::CameraControl camera_control_msg_;
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_FOCUS;
camera_control_msg_.request.manual_ctrl = false;
f_reset = false;
buffer[0] = -1;
curr = (end+beg)/2 + (end+beg)%4;
low_left = beg;
high_right = end;
camera_control_msg_.request.value = curr;
camera_control_client_.call(camera_control_msg_);
std::this_thread::sleep_for(std::chrono::milliseconds(400));
BEST = curr;
buffer[0] = focus_value;
high_left = curr;
low_right = curr;
curr_left = (high_left+low_left)/2 - ((high_left+low_left)/2)%4;
curr_right = (high_right+low_right)/2 + ((high_right+low_right)/2)%4;
if(curr_left == curr){
stop_left = true;
buffer[1] = -1;
}else stop_left = false;
if(curr_right == curr){
stop_right = true;
buffer[2] = -1;
}else stop_right = false;
while(!stop_left || !stop_right){
if(!stop_left){
camera_control_msg_.request.value = curr_left;
camera_control_client_.call(camera_control_msg_);
std::this_thread::sleep_for(std::chrono::milliseconds(400));
buffer[1] = focus_value;
if(buffer[1] > buffer[0]){
buffer[0] = buffer[1];
BEST = curr_left;
}
}
if(!stop_right){
camera_control_msg_.request.value = curr_right;
camera_control_client_.call(camera_control_msg_);
std::this_thread::sleep_for(std::chrono::milliseconds(500));
buffer[2] = focus_value;
if(buffer[2] > buffer[0]){
buffer[0] = buffer[2];
BEST = curr_right;
}
}
if(buffer[1] >= buffer[2]){
curr = curr_left;
high_right = curr;
} else {
curr = curr_right;
low_left = curr;
}
high_left = curr;
low_right = curr;
curr_left = (high_left+low_left)/2 - ((high_left+low_left)/2)%4;
curr_right = (high_right+low_right)/2 + ((high_right+low_right)/2)%4;
if(curr_left == curr){
stop_left = true;
buffer[1] = -1;
} else stop_left = false;
if(curr_right == curr){
stop_right = true;
buffer[2] = -1;
} else stop_right = false;
}
camera_control_msg_.request.value = BEST;
camera_control_client_.call(camera_control_msg_);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
f_reset = true;
// std::cout << " Done. Focus value is " << BEST <<"\n";
}
void Executor::setMountModeParameters()
{
mavros_msgs::ParamGet get_param_in, get_param_out;
get_param_in.request.param_id = "MNT_MODE_IN";
get_param_out.request.param_id = "MNT_MODE_OUT";
mavros_msgs::ParamSet set_param;
ros::Time last_request = ros::Time::now();
while(ros::ok())
{
if((ros::Time::now() - last_request > ros::Duration(5.0)))
{
if(get_param_client_.call(get_param_in) && get_param_in.response.success && get_param_in.response.value.integer != 3)
{
set_param.request.param_id = "MNT_MODE_IN";
set_param.request.value.integer = 3;
set_param_client_.call(set_param);
}
if(get_param_client_.call(get_param_out) && get_param_out.response.success && get_param_out.response.value.integer != 0)
{
set_param.request.param_id = "MNT_MODE_OUT";
set_param.request.value.integer = 0;
set_param_client_.call(set_param);
}
if(get_param_in.response.value.integer == 3 && get_param_out.response.value.integer == 0)
break;
last_request = ros::Time::now();
}
ros::spinOnce();
ros::Duration(0.1).sleep();
}
}
void Executor::toEulerAngles_YXZ(const Eigen::Quaterniond& _q, Eigen::Vector3d& euler)
{
Eigen::Quaterniond q(Eigen::AngleAxisd(-M_PI/2, Eigen::Vector3d::UnitZ())*Eigen::AngleAxisd(-M_PI, Eigen::Vector3d::UnitX())*_q.normalized().toRotationMatrix());
double t0 = -2.0 * (q.x() * q.y() - q.w() * q.z());
double t1 = q.w()*q.w() - q.x()*q.x() + q.y()*q.y() - q.z()*q.z();
double t2 = 2.0 * (q.y()*q.z() + q.w()*q.x());
double t3 = -2.0 * (q.x()*q.z() - q.w()*q.y());
double t4 = q.w()*q.w() - q.x()*q.x() - q.y()*q.y() + q.z()*q.z();
// 0
euler.y() = atan2(t3, t4);
// 1
t2 = t2 > 1.0 ? 1.0 : t2;
t2 = t2 < -1.0 ? -1.0 : t2;
euler.x() = asin(t2);
// 2
euler.z() = atan2(t0, t1);
}
void Executor::rotationMatrixToEulerAngles_XYZ(Eigen::Matrix3d& R, Eigen::Vector3d& euler){
if (R(2,0) < 1) {
if (R(2,0) > -1) {
euler.y() = asin(-R(2,0));
euler.z() = atan2(R(1,0),R(0,0));
euler.x() = atan2(R(2,1),R(2,2));
} else { // R(2,0) = -1
// Not a unique solution: euler.x() - euler.z() = atan2(-R(1,2),R(1,1))
euler.y() = M_PI/2;
euler.z() = -atan2(-R(1,2),R(1,1));
euler.x() = 0;
}
} else { // R(2,0) = 1
// Not a unique solution: euler.x() + euler.z() = atan2(-R(1,2),R(1,1))
euler.y() = -M_PI/2;
euler.z() = atan2(-R(1,2),R(1,1));
euler.x() = 0;
}
}
void Executor::preemptCallback()
{
goal_preempt_ = true;
}
void Executor::toSMatrix(Eigen::Vector3d& w, Eigen::Matrix3d& S)
{
S << 0, -w.z(), w.y(),
w.z(), 0, -w.x(),
-w.y(), w.x(), 0;
}
void Executor::toSInvMatrix(Eigen::Matrix3d& R__, Eigen::Vector3d& S_inv__)
{
S_inv__ << R__(2,1), R__(0,2), R__(1,0);
}
void Executor::toPIMatrix(Eigen::Vector3d& rx, Eigen::Matrix3d& PI)
{
PI = Eigen::Matrix3d::Identity() - rx*rx.transpose();
}
void Executor::takeOff()
{
feedback_.status= true;
server_->publishFeedback(feedback_);
uav_abstraction_layer::TakeOff take_off_msg;
take_off_msg.request.height = list_wp_[0].point.z;
take_off_msg.request.blocking = true;
result_.goal_achieved = take_off_client_.call(take_off_msg);
server_->setSucceeded(result_);
action_type_ = EXECUTOR_IDLE;
}
void Executor::land()
{
feedback_.status= true;
server_->publishFeedback(feedback_);
uav_abstraction_layer::Land land_msg;
land_msg.request.blocking = true;
result_.goal_achieved = land_client_.call(land_msg);
server_->setSucceeded(result_);
action_type_ = EXECUTOR_IDLE;
}
void Executor::goToWaypoint()
{
if (cont < list_wp_.size()) {
result_.goal_achieved=false;
setpoint_pose_.header.stamp = ros::Time::now();
setpoint_pose_.header.frame_id = "map";
setpoint_pose_.pose.position.x = list_wp_[cont].point.x;
setpoint_pose_.pose.position.y = list_wp_[cont].point.y;
setpoint_pose_.pose.position.z = list_wp_[cont].point.z;
setpoint_pose_.pose.orientation.z = drone_att_.z();
setpoint_pose_.pose.orientation.y = drone_att_.y();
setpoint_pose_.pose.orientation.x = drone_att_.x();
setpoint_pose_.pose.orientation.w = drone_att_.w();
go_to_waypoint_pub_.publish(setpoint_pose_);
if (drone_alarmed_when_doing_gotowaypoint_==true) { // Alarm received while doing the gotowaypoint, don't keep iterating.
action_type_ = EXECUTOR_IDLE;
action_received_ = false;
drone_alarmed_when_doing_gotowaypoint_ = false;
cont = 0;
return;
}
wp << setpoint_pose_.pose.position.x , setpoint_pose_.pose.position.y , setpoint_pose_.pose.position.z;
diff = (wp - drone_pos_).norm();
if(diff<=0.5) { // if wp is reached, command next waypoint
cont = cont +1;
}
} else if (cont == list_wp_.size()) {
if (final_yaw_if_gotowaypoint_.z==0 && final_yaw_if_gotowaypoint_.y==0 && final_yaw_if_gotowaypoint_.x==0 && final_yaw_if_gotowaypoint_.w==0) {
result_.goal_achieved=true;
server_->setSucceeded(result_);
action_type_ = EXECUTOR_IDLE;
action_received_ = false;
drone_alarmed_when_doing_gotowaypoint_ = false;
cont = 0;
return;
}
// Last go to wp at the final position of the path but with the final yaw:
setpoint_pose_.header.stamp = ros::Time::now();
setpoint_pose_.header.frame_id = "map";
setpoint_pose_.pose.position.x = list_wp_[cont-1].point.x;
setpoint_pose_.pose.position.y = list_wp_[cont-1].point.y;
setpoint_pose_.pose.position.z = list_wp_[cont-1].point.z;
setpoint_pose_.pose.orientation.z = final_yaw_if_gotowaypoint_.z;
setpoint_pose_.pose.orientation.y = final_yaw_if_gotowaypoint_.y;
setpoint_pose_.pose.orientation.x = final_yaw_if_gotowaypoint_.x;
setpoint_pose_.pose.orientation.w = final_yaw_if_gotowaypoint_.w;
go_to_waypoint_pub_.publish(setpoint_pose_);
if (drone_alarmed_when_doing_gotowaypoint_==true) { // Alarm received while doing the gotowaypoint, don't keep iterating.
action_type_ = EXECUTOR_IDLE;
action_received_ = false;
drone_alarmed_when_doing_gotowaypoint_ = false;
cont = 0;
return;
}
// Desired yaw (z-axis rotation) in radians:
double desired_yaw = atan2(2*(final_yaw_if_gotowaypoint_.w*final_yaw_if_gotowaypoint_.z+final_yaw_if_gotowaypoint_.x*final_yaw_if_gotowaypoint_.y),\
1-2*(final_yaw_if_gotowaypoint_.y*final_yaw_if_gotowaypoint_.y+final_yaw_if_gotowaypoint_.z*final_yaw_if_gotowaypoint_.z));
diff = abs(desired_yaw - drone_yaw_);
if(diff<=0.261799388) { // if wp is reached (difference <= 10 sexagesimal degrees), goal achieved
result_.goal_achieved=true;
server_->setSucceeded(result_);
action_type_ = EXECUTOR_IDLE;
action_received_ = false;
drone_alarmed_when_doing_gotowaypoint_ = false;
cont = 0;
}
}
}
void Executor::followVehicle()
{
desired_point << trailer.desired_point_, trailer.altitude;
double pos_error_tanh_x = MAX_DRONE_SPEED*tanh(K_DRONE*(drone_pos_[0]-desired_point[0])/MAX_DRONE_SPEED);
double pos_error_tanh_y = MAX_DRONE_SPEED*tanh(K_DRONE*(drone_pos_[1]-desired_point[1])/MAX_DRONE_SPEED);
double pos_error_tanh_z = MAX_DRONE_SPEED*tanh(K_DRONE*(drone_pos_[2]-desired_point[2])/MAX_DRONE_SPEED);
Eigen::Vector3d pos_error_tanh(pos_error_tanh_x,pos_error_tanh_y,pos_error_tanh_z);
Eigen::Vector3d v_;
v_ << trailer.v_desired, 0;
cmd_linear_velocity = -pos_error_tanh + v_;
tf::vectorEigenToMsg(cmd_linear_velocity, cmd_linear_velocity_);
setpoint_vel_.header.stamp = ros::Time::now();
setpoint_vel_.header.frame_id = "map";
if(!std::isnan(cmd_linear_velocity.x()) && !std::isnan(cmd_linear_velocity.y()) && !std::isnan(cmd_linear_velocity.z())
&& !std::isinf(cmd_linear_velocity.x()) && !std::isinf(cmd_linear_velocity.y()) && !std::isinf(cmd_linear_velocity.z()))
setpoint_vel_.twist.linear = cmd_linear_velocity_;
else {
action_type_ = EXECUTOR_IDLE;
feedback_.status = false;
feedback_.action_id = action_id_;
server_->publishFeedback(feedback_);
ROS_ERROR("Got NAN, shooting action will be discarded. \ndesired_point = %f, %f, %f \nv_ = %f, %f, %f \nv_target = %f, %f \nv_trailer_ = %f, %f \nw_trailer_ = %f \nTs = %f \n", desired_point.x(), desired_point.y(), desired_point.z(), v_.x(),v_.y(),v_.z(), trailer.v_target.x(), trailer.v_target.y(), trailer.v_trailer_.x(), trailer.v_trailer_.y(), trailer.w_trailer, trailer.Ts[trailer.filter-2]);
return;
}
setpoint_vel_.twist.angular.x = 0;
setpoint_vel_.twist.angular.y = 0;
if (shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_FLY_THROUGH || shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_LATERAL || shooting_mode_ == multidrone_msgs::TargetIdentifierType::NONE)
drone_heading_error = remainder((drone_yaw_ - trailer.a_), 2*M_PI);
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_ORBIT){
Eigen::Vector3d heading = (drone_pos_ - rt_target_pos_);
if (trailer.angular_speed > 0)
heading = heading.cross(-Eigen::Vector3d::UnitZ());
else
heading = heading.cross(Eigen::Vector3d::UnitZ());
drone_heading_error = remainder((drone_yaw_ - atan2(heading.y(),heading.x())), 2*M_PI);
}
else
drone_heading_error = remainder((drone_yaw_ - atan2 (shooting_target_pos_.y() - drone_pos_.y(), shooting_target_pos_.x() - drone_pos_.x())), 2*M_PI);
if (drone_heading_error > M_PI)
drone_heading_error = -2*M_PI + drone_heading_error;
setpoint_vel_.twist.angular.z = MAX_YAW_RATE*tanh((-K_DRONE_YAW*drone_heading_error + trailer.w_trailer)/MAX_YAW_RATE);
goal_direction_ = setpoint_vel_.twist.linear;
wished_mov_dir_pub_.publish(goal_direction_);
set_velocity_pub_.publish(setpoint_vel_);
}
/**
*
*/
void Executor::collisionWarningCallback(const std_msgs::Bool::ConstPtr& collision_warning)
{
confl_warning_ = collision_warning->data;
}
/**
*
*/
void Executor::avoidMovementCallback(const geometry_msgs::Vector3::ConstPtr& avoidance_direction)
{
avoid_mov_direction_.x = avoidance_direction->x;
avoid_mov_direction_.y = avoidance_direction->y;
avoid_mov_direction_.z = avoidance_direction->z;
}
void Executor::actionCallback()
{
// If the new drone action is a gotowaypoint and right now is running another gotowaypoint, it means that an alarm has been triggered while doing it.
// In this case wait until the current gotowaypoint finish to start the next alarm's gotowaypoint.
if (action_type_==multidrone_msgs::DroneAction::TYPE_GOTOWAYPOINT && goal_.action_type==multidrone_msgs::DroneAction::TYPE_GOTOWAYPOINT) {
drone_alarmed_when_doing_gotowaypoint_ = true;
while (drone_alarmed_when_doing_gotowaypoint_ && ros::ok) {
ros::Rate loop_rate(1); // [Hz]
loop_rate.sleep();
}
}
timer_rt.stop();
timer_trailer.stop();
running_trailer = false;
action_received_ = true;
start_time = ros::Time::now().toSec();
goal_ = server_->acceptNewGoal()->action_goal;
// saving list of waypoints
cont = 0;
list_wp_.clear();
action_type_= goal_.action_type;
action_id_ = goal_.action_id;
// Save final_yaw_if_gotowaypoint from this drone action into its class attribute, so that later a new last waypoint with different yaw can be sent in the navigation actions.
final_yaw_if_gotowaypoint_.x = goal_.final_yaw_if_gotowaypoint.x;
final_yaw_if_gotowaypoint_.y = goal_.final_yaw_if_gotowaypoint.y;
final_yaw_if_gotowaypoint_.z = goal_.final_yaw_if_gotowaypoint.z;
final_yaw_if_gotowaypoint_.w = goal_.final_yaw_if_gotowaypoint.w;
if (action_type_ == multidrone_msgs::DroneAction::TYPE_SHOOTING) {
shooting_parameters_.clear();
target_parameters_.clear();
for(int i = 0; i<goal_.shooting_action.shooting_roles[0].shooting_parameters.size(); i++)
shooting_parameters_[goal_.shooting_action.shooting_roles[0].shooting_parameters[i].param] = goal_.shooting_action.shooting_roles[0].shooting_parameters[i].value;
for(int i = 0; i<goal_.shooting_action.shooting_roles[0].target_parameters.size(); i++)
target_parameters_[goal_.shooting_action.shooting_roles[0].target_parameters[i].param] = goal_.shooting_action.shooting_roles[0].target_parameters[i].value;
for(int i = 0; i<goal_.shooting_action.rt_trajectory.size(); i++)
list_wp_.push_back(goal_.shooting_action.rt_trajectory[i]);
shooting_action_ = goal_.shooting_action.shooting_roles[0].shooting_type.type;
trailer.originOfFormation_(0) = goal_.shooting_action.rt_displacement.x + list_wp_[0].point.x;
trailer.originOfFormation_(1) = goal_.shooting_action.rt_displacement.y + list_wp_[0].point.y;
duration_ = goal_.shooting_action.duration;
length_ = goal_.shooting_action.length;
rt_mode_ = goal_.shooting_action.rt_mode;
// std::cout << "rt_mode_ is " << (int)rt_mode_ << "\n";
if (rt_mode_ != multidrone_msgs::ShootingAction::RT_MODE_VIRTUAL_TRAJ){
rt_id_ = goal_.shooting_action.rt_id;
// std::cout << "rt_id_ is " << (int)rt_id_ << "\n";
}
shooting_mode_ = goal_.shooting_action.shooting_roles[0].target_identifier_type.type;
if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::GPS){
shooting_id_ = (uint8_t) target_parameters_["ID"];
// std::cout << "shooting_id_ is " << (int)shooting_id_ << "\n";
}
else if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL){
multidrone_msgs::FollowTarget follow_target_msg;
follow_target_msg.request.target_id = 0;
follow_target_msg.request.target_type.type = 0;
follow_target_client_.call(follow_target_msg);
}
else if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL_GPS){
multidrone_msgs::FollowTarget follow_target_msg;
shooting_id_ = (uint8_t) target_parameters_["ID"];
follow_target_msg.request.target_id = 0;
follow_target_msg.request.target_type.type = 1;
follow_target_client_.call(follow_target_msg);
// std::cout << "VISUAL_GPS shooting_id_ is " << (int)shooting_id_ << "\n";
}
else if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::NONE){
pan_s = target_parameters_["pan_s"];
tilt_s = target_parameters_["tilt_s"];
pan_e = target_parameters_["pan_e"];
tilt_e = target_parameters_["tilt_e"];
}
else if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::STATIC){
latitude = target_parameters_["latitude"];
longitude = target_parameters_["longitude"];
altitude = target_parameters_["altitude"];
}
if (rt_mode_ == multidrone_msgs::ShootingAction::RT_MODE_VIRTUAL_TRAJ || shooting_mode_ == multidrone_msgs::TargetIdentifierType::VIRTUAL){
total_displ << (list_wp_[list_wp_.size()-1].point.x - list_wp_[0].point.x) , (list_wp_[list_wp_.size()-1].point.y -list_wp_[0].point.y) , (list_wp_[list_wp_.size()-1].point.z -list_wp_[0].point.z);
formation_speed_ = goal_.shooting_action.formation_speed;
if (total_displ.norm() == 0){
total_displ << 1,1,1;
formation_speed_ = 0;
}
if (rt_mode_ == multidrone_msgs::ShootingAction::RT_MODE_VIRTUAL_TRAJ){
rt_target_pos_.x() = list_wp_[0].point.x;
rt_target_pos_.y() = list_wp_[0].point.y;
rt_target_pos_.z() = list_wp_[0].point.z;
}
if (shooting_mode_ == multidrone_msgs::TargetIdentifierType::VIRTUAL){
shooting_target_pos_.x() = list_wp_[0].point.x;
shooting_target_pos_.y() = list_wp_[0].point.y;
shooting_target_pos_.z() = list_wp_[0].point.z;
}
timer_rt.start();
}
//trailer parameters
trailer.initial_azimuth = 0;
trailer.radius = 0;
trailer.angular_speed = 0;
trailer.rx = 0;
trailer.rx_e = 0;
trailer.rx_s = 0;
trailer.ry = 0;
trailer.rx_dot = 0;
trailer.ry_dot = 0;
trailer.altitude = 0;
trailer.z_e = 0;
trailer.altitude_dot = 0;
trailer.d = trailer_size;
if (rt_mode_ != multidrone_msgs::ShootingAction::RT_MODE_ACTUAL_TARGET)
trailer.d = 0;
std::this_thread::sleep_for(std::chrono::milliseconds(100));
if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_STATIC) //0
{
trailer.altitude = shooting_parameters_["z_0"];
trailer.trailer_type = TRAILER_TYPE_INERTIAL;
}
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_FLY_THROUGH && rt_mode_ == multidrone_msgs::ShootingAction::RT_MODE_VIRTUAL_TRAJ) //1
{
trailer.altitude = shooting_parameters_["z_0"];
trailer.trailer_type = TRAILER_TYPE_VARIABLE;
}
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_ESTABLISH) //2
{
trailer.rx_s = shooting_parameters_["x_s"];
trailer.rx_e = shooting_parameters_["x_e"];
trailer.rx_dot = (shooting_parameters_["x_e"] - shooting_parameters_["x_s"])/duration_.data.sec;
trailer.altitude = shooting_parameters_["z_s"];
trailer.z_e = shooting_parameters_["z_e"];
trailer.altitude_dot = (shooting_parameters_["z_e"] - shooting_parameters_["z_s"])/duration_.data.sec;
trailer.trailer_type = TRAILER_TYPE_VARIABLE;
}
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_CHASE || shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_LEAD) //3
{
trailer.rx_s = shooting_parameters_["x_s"];
trailer.rx_e = shooting_parameters_["x_e"];
trailer.rx_dot = (shooting_parameters_["x_e"] - shooting_parameters_["x_s"])/duration_.data.sec;
trailer.altitude = shooting_parameters_["z_0"];
trailer.trailer_type = TRAILER_TYPE_VARIABLE;
}
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_FLYBY) //5
{
trailer.rx_s = shooting_parameters_["x_s"];
trailer.rx_e = shooting_parameters_["x_e"];
trailer.ry = shooting_parameters_["y_0"];
trailer.rx_dot = (shooting_parameters_["x_e"] - shooting_parameters_["x_s"])/duration_.data.sec;
trailer.altitude = shooting_parameters_["z_0"];
trailer.trailer_type = TRAILER_TYPE_VARIABLE;
}
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_LATERAL) //6
{
trailer.ry = shooting_parameters_["y_0"];
trailer.altitude = shooting_parameters_["z_0"];
trailer.trailer_type = TRAILER_TYPE_VARIABLE;
}
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_ELEVATOR) //7
{
trailer.altitude = shooting_parameters_["z_s"];
trailer.z_e = shooting_parameters_["z_e"];
trailer.altitude_dot = (shooting_parameters_["z_e"] - shooting_parameters_["z_s"])/duration_.data.sec;
trailer.trailer_type = TRAILER_TYPE_INERTIAL;
}
else if(shooting_action_ == multidrone_msgs::ShootingType::SHOOT_TYPE_ORBIT) //8
{
trailer.initial_azimuth = shooting_parameters_["azimuth_s"];
trailer.radius = shooting_parameters_["r_0"];
trailer.angular_speed = shooting_parameters_["azimuth_speed"];
trailer.altitude = shooting_parameters_["z_0"];
trailer.trailer_type = TRAILER_TYPE_ORBIT;
}
else {
action_type_ = EXECUTOR_IDLE;
feedback_.status = false;
feedback_.action_id = action_id_;
server_->publishFeedback(feedback_);
return;
}
// std::cout << "shooting_action_ is " << (int)shooting_action_ << "\n";
// std::cout << "trailer.ry is " << trailer.ry << "\n";
// std::cout << "trailer.altitude is " << trailer.altitude << "\n";
trailer.r << trailer.rx, trailer.ry;
trailer.r_dot << trailer.rx_dot, trailer.ry_dot;
parameters_set = true;
multidrone_msgs::SetFramingType set_framing_type_msg;
set_framing_type_msg.request.target_framing_type.type = goal_.shooting_action.shooting_roles[0].framing_type.type;
set_framing_type_client_.call(set_framing_type_msg);
}
else //Takeoff, land or gotowaypoint
for(int i = 0; i<goal_.path.size(); i++)
list_wp_.push_back(goal_.path[i]);
}
void Executor::gimbalAutomatic(double pan_s, double tilt_s, double pan_e, double tilt_e, double duration, double start_time)
{
double t = ros::Time::now().toSec() - start_time;
double pan = pan_s + t*(pan_e - pan_s)/duration;
double tilt = tilt_s + t*(tilt_e - tilt_s)/duration;
toEulerAngles_YXZ(gimbal_att_,gimbal_euler);
geometry_msgs::Vector3 msg_gimbal_cmd;
msg_gimbal_cmd.x = - gimbal_euler.x()*TO_DEG;
msg_gimbal_cmd.y = 1.5*(tilt - gimbal_euler.y()*TO_DEG);
msg_gimbal_cmd.z = 1.5*(pan - (gimbal_euler.z()+drone_yaw_)*TO_DEG);
if(t>=duration){
msg_gimbal_cmd.y = 0;
msg_gimbal_cmd.z = 0;
}
gimbal_cmd_basecam_pub.publish(msg_gimbal_cmd);
if(simulation){
rotationMatrixToEulerAngles_XYZ(R_x, gimbal_euler);
if(std::abs(gimbal_euler[0]) >= 3){ //to avoid singularity on simulated gimbal
gimbal_euler[0] = gimbal_euler[0] - M_PI;
gimbal_euler[1] = M_PI - gimbal_euler[1];
gimbal_euler[2] = gimbal_euler[2] - M_PI;
}
if (!has_gimbal_status_)
R_C_I = R_x;
gimbal_cmd_.request.param1 = tilt -90; // pitch
gimbal_cmd_.request.param2 = gimbal_euler.x()*TO_DEG;;// roll
gimbal_cmd_.request.param3 = pan +drone_yaw_*TO_DEG; // yaw
if(t<duration)
cmd_long_client_.call(gimbal_cmd_);
}
}
void Executor::gimbalSafeMode()
{
w_est << 0.0, 0.0, 0.0;
w_est_ << 0.0, 0.0, 0.0;
t_w_ = ros::Time::now().toSec();
geometry_msgs::Vector3 msg_gimbal_cmd;
msg_gimbal_cmd.x = 0;
msg_gimbal_cmd.y = 0;
msg_gimbal_cmd.z = 0;
gimbal_cmd_basecam_pub.publish(msg_gimbal_cmd);
}
void Executor::gimbalController()
{
if (has_drone_pose_ && has_shooting_target_status_ ){ //GPS
o(0) = K.inverse()(1, 1) * -target_image_offset_y;
o(1) = K.inverse()(0, 0) * target_image_offset_x;
o(2) = 0.0;
q_I = p_T_I - p_B_I - R_B_I*p_C_B + R_C_I*o*q_C.z();
q_I_dot << 0.0, 0.0, 0.0;
q_norm = R_C_I.transpose()*(q_I/q_I.norm());
r_x_3 = q_I/q_I.norm();
a = r_x_3.cross(-Eigen::Vector3d::UnitZ());
has_visual_2D_target_status_ = false;
}
else if (has_drone_pose_ && shooting_mode_ == multidrone_msgs::TargetIdentifierType::STATIC){ // static
p_T_I << latitude, longitude, altitude;
q_I = p_T_I - p_B_I - R_B_I*p_C_B;
q_I_dot << 0.0, 0.0, 0.0;
q_norm = R_C_I.transpose()*(q_I/q_I.norm());
r_x_3 = q_I/q_I.norm();
a = r_x_3.cross(-Eigen::Vector3d::UnitZ());
has_visual_2D_target_status_ = false;
}
else{
gimbalSafeMode();
return;
}
r_x_2 = a/a.norm();
b = r_x_2.cross(r_x_3);
r_x_1 = b/b.norm();
R_x.col(0) = r_x_1;
R_x.col(1) = r_x_2;
R_x.col(2) = r_x_3;
if (!has_visual_2D_target_status_)
R_e = R_C_I.transpose()*R_x;
else
R_e = R_x;
R_e_R_e = R_e-R_e.transpose();
toSInvMatrix(R_e_R_e,w_e);
// Calculate Lyapunov function
V_1_ = Eigen::MatrixXd::Identity(3,3)-R_e;
lyapunov_visual = V_1_(0,0)+V_1_(1,1)+V_1_(2,2);
w = k_1*w_e;
if(shooting_action_ != multidrone_msgs::ShootingType::SHOOT_TYPE_LATERAL && shooting_action_ != multidrone_msgs::ShootingType::SHOOT_TYPE_STATIC &&
shooting_action_ != multidrone_msgs::ShootingType::SHOOT_TYPE_ELEVATOR){
run_w_estimate();
w = w_est + k_1*w_e;
}
toEulerAngles_YXZ(gimbal_att_,gimbal_euler);
Q_inv << cos(gimbal_euler.y()), 0, sin(gimbal_euler.y()),
sin(gimbal_euler.x())*sin(gimbal_euler.y()), 1, -sin(gimbal_euler.x())*cos(gimbal_euler.y()),
-sin(gimbal_euler.y()), 0, cos(gimbal_euler.y());
euler_dot_cmd = Q_inv*w;
geometry_msgs::Vector3 msg_gimbal_cmd;
msg_gimbal_cmd.x = euler_dot_cmd[0]*TO_DEG;
msg_gimbal_cmd.y = euler_dot_cmd[1]*TO_DEG;
msg_gimbal_cmd.z = euler_dot_cmd[2]*TO_DEG;
gimbal_cmd_basecam_pub.publish(msg_gimbal_cmd);
if(simulation){
if ((shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL || shooting_mode_ == multidrone_msgs::TargetIdentifierType::VISUAL_GPS) && has_visual_2D_target_status_) {R_x = R_C_I*R_e;}
rotationMatrixToEulerAngles_XYZ(R_x, gimbal_euler);
if(std::abs(gimbal_euler[0]) >= 3){ //to avoid singularity on simulated gimbal
gimbal_euler[0] = gimbal_euler[0] - M_PI;
gimbal_euler[1] = M_PI - gimbal_euler[1];
gimbal_euler[2] = gimbal_euler[2] - M_PI;
}
if (!has_gimbal_status_)
R_C_I = R_x;
gimbal_cmd_.request.param1 = gimbal_euler[1]*TO_DEG-90; // pitch
gimbal_cmd_.request.param2 = gimbal_euler[0]*TO_DEG; // roll
gimbal_cmd_.request.param3 = gimbal_euler[2]*TO_DEG+drone_yaw_*TO_DEG; // yaw
cmd_long_client_.call(gimbal_cmd_);
}
}
void Executor::CameraControl(){
multidrone_msgs::CameraControl camera_control_msg_;
camera_control_msg_.request.control_function = multidrone_msgs::CameraControl::Request::CONTROL_FUNCTION_ZOOM;
camera_control_msg_.request.manual_ctrl = false;
if (zoom_error > 0.05){
if (count > 0){
camera_control_msg_.request.value = uint8_t(127 - 88 * (zoom_error-0.05));
camera_control_client_.call(camera_control_msg_);
count--;
}
}
else if (zoom_error < -0.05){
if (count < count_lim){
camera_control_msg_.request.value = uint8_t(129 - 88 * (zoom_error+0.05));
camera_control_client_.call(camera_control_msg_);
count++;
}
}
}
|
{"hexsha": "da02dce507a0b864e8da46a97e6a3e40c9601634", "size": 52187, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "action_executor/src/action_executor.cpp", "max_stars_repo_name": "grvcTeam/multidrone_planning", "max_stars_repo_head_hexsha": "421a7d81a3417cdc6bcb690d3d88bb4e9d6b6638", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2020-07-02T07:00:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T07:59:41.000Z", "max_issues_repo_path": "action_executor/src/action_executor.cpp", "max_issues_repo_name": "grvcTeam/multidrone_planning", "max_issues_repo_head_hexsha": "421a7d81a3417cdc6bcb690d3d88bb4e9d6b6638", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "action_executor/src/action_executor.cpp", "max_forks_repo_name": "grvcTeam/multidrone_planning", "max_forks_repo_head_hexsha": "421a7d81a3417cdc6bcb690d3d88bb4e9d6b6638", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-01-01T06:35:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-24T09:54:21.000Z", "avg_line_length": 39.9288446825, "max_line_length": 404, "alphanum_fraction": 0.6739226244, "num_tokens": 14698}
|
import unittest
import numpy as np
import tensorflow as tf
from pymatgen.core import Lattice, Structure
from m3gnet.graph import Index, MaterialGraph, RadiusCutoffGraphConverter
class TestConverter(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.s1 = Structure(Lattice.cubic(3.17), ["Mo", "Mo"], [[0, 0, 0], [0.5, 0.5, 0.5]])
cls.g1 = RadiusCutoffGraphConverter(cutoff=5.0, threebody_cutoff=4.0).convert(cls.s1)
def test_graph(self):
self.assertTrue(isinstance(self.g1, MaterialGraph))
glist = self.g1.as_list()
np.testing.assert_array_almost_equal(glist[Index.ATOMS].ravel(), [42, 42])
gstr = str(self.g1)
self.assertTrue(gstr.startswith("<Material"))
self.assertTrue(isinstance(self.g1.atoms, np.ndarray))
gtf = self.g1.as_tf()
self.assertTrue(isinstance(gtf.atoms, tf.Tensor))
self.assertTrue(self.g1.n_atom == 2)
self.assertTrue(self.g1.n_bond == self.g1.n_bonds[0])
self.assertTrue(self.g1.n_struct == 1)
self.assertTrue(self.g1.has_threebody)
g2 = MaterialGraph.from_list(self.g1.as_list())
self.assertTrue(self.g1 == g2)
g3 = self.g1.copy()
self.assertTrue(self.g1 == g3)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "c9a2df4ba46c20fcd38f749f07fa5a72e308ecf1", "size": 1309, "ext": "py", "lang": "Python", "max_stars_repo_path": "m3gnet/graph/tests/test_types.py", "max_stars_repo_name": "materialsvirtuallab/m3gnet", "max_stars_repo_head_hexsha": "94fb01d0c90d3b2bffcdc4514f7eb3cb8fab6c90", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-31T14:47:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:47:24.000Z", "max_issues_repo_path": "m3gnet/graph/tests/test_types.py", "max_issues_repo_name": "materialsvirtuallab/m3gnet", "max_issues_repo_head_hexsha": "94fb01d0c90d3b2bffcdc4514f7eb3cb8fab6c90", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "m3gnet/graph/tests/test_types.py", "max_forks_repo_name": "materialsvirtuallab/m3gnet", "max_forks_repo_head_hexsha": "94fb01d0c90d3b2bffcdc4514f7eb3cb8fab6c90", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5641025641, "max_line_length": 93, "alphanum_fraction": 0.6592818946, "include": true, "reason": "import numpy", "num_tokens": 347}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2022 Stéphane Caron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Function to solve inverse kinematics.
"""
from typing import Iterable, Tuple
import numpy as np
from qpsolvers import solve_qp
from .configuration import Configuration
from .limits import compute_velocity_limits
from .tasks import Task
def compute_qp_objective(
configuration: Configuration, tasks: Iterable[Task], damping: float
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the Hessian matrix :math:`H` and linear vector :math:`c` of the
QP objective function:
.. math::
\\frac{1}{2} \\Delta q^T H \\Delta q + c^T q
The configuration displacement :math:`\\Delta q` is the output of inverse
kinematics (we divide it by :math:`\\Delta t` to get a commanded velocity).
Args:
configuration: Robot configuration to read kinematics from.
tasks: List of kinematic tasks to fulfill at (weighted) best.
damping: weight of Tikhonov (everywhere) regularization. Its unit is
`[cost]^2 / [tangent]` where `[tangent]` is "the" unit of robot
velocities. Improves numerical stability, but larger values slow
down all tasks.
Returns:
Pair :math:`(H, c)` of Hessian matrix and linear vector of the QP
objective.
"""
H = damping * configuration.tangent.eye
c = configuration.tangent.zeros
for task in tasks:
H_task, c_task = task.compute_qp_objective(configuration)
H += H_task
c += c_task
return (H, c)
def solve_ik(
configuration: Configuration,
tasks: Iterable[Task],
dt: float,
damping: float = 1e-12,
solver: str = "quadprog",
) -> np.ndarray:
"""
Compute a velocity tangent to the current robot configuration that
satisfies at (weighted) best a given set of kinematic tasks.
Args:
configuration: Robot configuration to read kinematics from.
tasks: List of kinematic tasks.
dt: Integration timestep in [s].
damping: weight of Tikhonov (everywhere) regularization. Its unit is
:math:`[\\mathrm{cost}]^2 / [\\mathrm{tangent}]` where
:math:`[\\mathrm{tangent}]` is "the" unit of robot velocities.
Improves numerical stability, but larger values slow down all
tasks.
solver: Backend quadratic programming solver.
Returns:
Velocity :math:`v` in tangent space.
Raises:
NotWithinConfigurationLimits: if the current configuration is not
within limits.
Note:
Our Tikhonov damping is isotropic despite tangent velocities not being
homogeneous. If it helps we can add a tangent-space scaling to damp the
floating base differently from joint angular velocities.
"""
configuration.check_limits()
H, c = compute_qp_objective(configuration, tasks, damping)
v_max, v_min = compute_velocity_limits(configuration, dt)
tangent_eye = configuration.tangent.eye
A = np.vstack([tangent_eye, -tangent_eye])
b = np.hstack([v_max * dt, -v_min * dt])
Delta_q = solve_qp(H, c, A, b, solver=solver)
assert Delta_q is not None
v: np.ndarray = Delta_q / dt
return v
|
{"hexsha": "d940173ff9c2754a23af0d2ffa77ea1fdb9adeb5", "size": 3770, "ext": "py", "lang": "Python", "max_stars_repo_path": "pink/solve_ik.py", "max_stars_repo_name": "tasts-robots/pink", "max_stars_repo_head_hexsha": "966385c89bee8a8004007cad666657b015a29f3a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pink/solve_ik.py", "max_issues_repo_name": "tasts-robots/pink", "max_issues_repo_head_hexsha": "966385c89bee8a8004007cad666657b015a29f3a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pink/solve_ik.py", "max_forks_repo_name": "tasts-robots/pink", "max_forks_repo_head_hexsha": "966385c89bee8a8004007cad666657b015a29f3a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6607142857, "max_line_length": 79, "alphanum_fraction": 0.6782493369, "include": true, "reason": "import numpy", "num_tokens": 893}
|
import numpy as np
class BatchHeatmapUtils:
@classmethod
def flatten_batch_heatmaps(self, batch_heatmaps: np.ndarray, batch_size: int, num_joints: int) -> np.ndarray:
return batch_heatmaps.reshape((batch_size, num_joints, -1))
@classmethod
def find_flattened_heatmap_maxvals(self, flattened_heatmaps: np.ndarray, batch_size: int, num_joints: int):
# find index of max value in each heatmap
flat_idx = np.argmax(flattened_heatmaps, axis=2)
# find max value in each heatmap
flat_maxvals = np.amax(flattened_heatmaps, axis=2)
# Reshape to isolate maxvals and maxvals indicies
flat_idx = flat_idx.reshape((batch_size, num_joints, 1))
flat_maxvals = flat_maxvals.reshape((batch_size, num_joints, 1))
return flat_idx, flat_maxvals
@classmethod
def get_unflattened_coords(self, flattened_idx: np.ndarray, heatmap_width: int) -> np.ndarray:
# Start to build prediction coords using x = y = flattened index.
coords = np.tile(flattened_idx, (1, 1, 2)).astype(np.float32)
# Convert flattened indecies to actual (x, y) coordinates
coords[:, :, 0] = (coords[:, :, 0]) % heatmap_width # x = x % heatmap_width for each x coordinate
coords[:, :, 1] = np.floor((coords[:, :, 1]) / heatmap_width) # y = y / heatmap_width for each y coordinate
# Now we have the actual (x, y) coordinates of the location of the maxval in each heatmap
return coords
@classmethod
def zero_negative_maxval_coords(self, coords: np.ndarray, maxvals: np.ndarray) -> np.ndarray:
# get pred_mask where positive values -> 1 and negative values -> 0
coords_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2)).astype(np.float32)
# Change coordinates that coorespond to negative maxvals to (0, 0)
return coords * coords_mask
def get_maxvals_and_coords(batch_heatmaps: np.ndarray) -> (np.ndarray, np.ndarray):
batch_size, num_joints, height, width = batch_heatmaps.shape
# Flatten heatmaps for maximum search
flattened_heatmaps = BatchHeatmapUtils.flatten_batch_heatmaps(
batch_heatmaps=batch_heatmaps,
batch_size=batch_size,
num_joints=num_joints
)
# Find maxval and maxval index in flattened heatmaps
idx, maxvals = BatchHeatmapUtils.find_flattened_heatmap_maxvals(
flattened_heatmaps=flattened_heatmaps,
batch_size=batch_size,
num_joints=num_joints
)
# Convert maxval index to (x, y) coordinates
coords = BatchHeatmapUtils.get_unflattened_coords(
flattened_idx=idx,
heatmap_width=width
)
# Filter out negative maxval coordinates
coords = BatchHeatmapUtils.zero_negative_maxval_coords(coords=coords, maxvals=maxvals)
return coords, maxvals
|
{"hexsha": "89b96687e0447f41d278df65d25849e5bbc4aaa2", "size": 2823, "ext": "py", "lang": "Python", "max_stars_repo_path": "common_utils/ml_utils/heatmap/heatmap.py", "max_stars_repo_name": "cm107/common_utils", "max_stars_repo_head_hexsha": "4b911efe9f8cdec16ecb2a983e16f772be05076c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "common_utils/ml_utils/heatmap/heatmap.py", "max_issues_repo_name": "cm107/common_utils", "max_issues_repo_head_hexsha": "4b911efe9f8cdec16ecb2a983e16f772be05076c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common_utils/ml_utils/heatmap/heatmap.py", "max_forks_repo_name": "cm107/common_utils", "max_forks_repo_head_hexsha": "4b911efe9f8cdec16ecb2a983e16f772be05076c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7727272727, "max_line_length": 115, "alphanum_fraction": 0.6967764789, "include": true, "reason": "import numpy", "num_tokens": 712}
|
#ifndef TRACKER_GMD_H
#define TRACKER_GMD_H
#include "tracker.h"
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h> /* GAUSSIAN*/
#include "helper/Constants.h"
#include <limits.h>
#include "helper/high_res_timer.h"
#include "helper/bounding_box_regressor.h"
class TrackerGMD : public Tracker {
public:
TrackerGMD(const bool show_tracking, ExampleGenerator* example_generator, RegressorTrainBase* regressor_train);
// Estimate the location of the target object in the current image.
virtual void Track(const cv::Mat& image_curr, RegressorBase* regressor,
BoundingBox* bbox_estimate_uncentered);
// After tracking for this frame, update internal state
virtual void UpdateState(const cv::Mat& image_curr, BoundingBox &bbox_estimate, RegressorBase* regressor, bool is_last_frame);
// Initialize the tracker with the ground-truth bounding box of the first frame.
virtual void Init(const cv::Mat& image_curr, const BoundingBox& bbox_gt,
RegressorBase* regressor);
virtual void Init(const std::string& image_curr_path, const VOTRegion& region,
RegressorBase* regressor);
// Online fine tune, given the networks and example_generators
virtual void FineTuneOnline(ExampleGenerator* example_generator,
RegressorTrainBase* regressor_train, bool success_frame, bool is_last_frame);
// Actual worker to do the finetune
void FineTuneWorker(ExampleGenerator* example_generator,
RegressorTrainBase* regressor_train,
std::vector<int> &this_bag,
const int pos_candidate_upper_bound = INT_MAX,
const int neg_candidate_upper_bound = INT_MAX);
// Motion Model around bbox_curr_prior_tight_
void GetCandidates(BoundingBox &cur_bbox, int W, int H, std::vector<BoundingBox> &candidate_bboxes);
// Check if generated candidate is valid or not
bool ValidCandidate(BoundingBox &candidate_bbox, int W, int H);
// Get one moved box, according to the Gaussian Motion Model
BoundingBox GenerateOneGaussianCandidate(int W, int H, BoundingBox &bbox, double sd_x = SD_X, double sd_y = SD_Y,
double sd_scale = SD_SCALE, double sd_ap = SD_AP);
// Create and Enqueue Training Samples given already set up example_generator
virtual void EnqueueOnlineTraningSamples(ExampleGenerator* example_generator, const cv::Mat &image_curr, const BoundingBox &estimate, bool success_frame);
// check if the current estimate is success, needed as flag to pass to EnqueueOnlineTraningSamples
virtual bool IsSuccessEstimate();
// clear all the related storage for tracking net video
virtual void Reset(RegressorBase *regressor);
private:
gsl_rng *rng_;
// Used to generate additional training examples through synthetic transformations.
ExampleGenerator* example_generator_;
// Neural network.
RegressorTrainBase* regressor_train_;
// this prediction scores for candidates
std::vector<float> candidate_probabilities_;
std::vector<BoundingBox> candidates_bboxes_;
std::vector<int> sorted_idxes_; // the sorted indexes of probabilities from high to low
// samples collected along each frame
std::vector<BoundingBox> gts_;
std::vector<cv::Mat> image_currs_;
std::vector<cv::Mat> images_finetune_;
std::vector<cv::Mat> targets_finetune_;
std::vector<std::vector<BoundingBox> > candidates_finetune_pos_;
std::vector<std::vector<BoundingBox> > candidates_finetune_neg_;
// std::vector<std::vector<double> > labels_finetune_pos_;
// std::vector<std::vector<double> > labels_finetune_neg_;
// long term and short term
std::vector<int> short_term_bag_;
std::vector<int> long_term_bag_;
std::mt19937 engine_;
// for motion model candidates
double sd_trans_;
double sd_scale_;
double sd_ap_;
// timer
HighResTimer hrt_;
// Bbox regressor
BoundingBoxRegressor bbox_finetuner_;
};
#endif
|
{"hexsha": "a503929b6d07b7834110fb81d978710052ea3915", "size": 4090, "ext": "h", "lang": "C", "max_stars_repo_path": "src/tracker/tracker_gmd.h", "max_stars_repo_name": "Jim61C/GMD_Tracker", "max_stars_repo_head_hexsha": "6c522b26f664c259bd371214e44c9c2cd32c51d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-04-14T14:33:30.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-19T21:49:19.000Z", "max_issues_repo_path": "src/tracker/tracker_gmd.h", "max_issues_repo_name": "Jim61C/GMD_Tracker", "max_issues_repo_head_hexsha": "6c522b26f664c259bd371214e44c9c2cd32c51d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tracker/tracker_gmd.h", "max_forks_repo_name": "Jim61C/GMD_Tracker", "max_forks_repo_head_hexsha": "6c522b26f664c259bd371214e44c9c2cd32c51d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8703703704, "max_line_length": 157, "alphanum_fraction": 0.7256723716, "num_tokens": 951}
|
import os
import sys
from simtk import unit
from benchmark import DATA_PATH
from benchmark.experiments.driver import ExperimentDescriptor, Experiment
from benchmark.testsystems import dhfr_constrained
import numpy as np
scale_factors = np.arange(1.0, 4.01, 0.25)
dt_range = np.arange(0.5, 8.01, 0.5)
splittings = {"OVRVO": "O V R V O",
"ORVRO": "O R V R O",
"RVOVR": "R V O V R",
"VRORV": "V R O R V",
}
system_name = "DHFR in explicit solvent (constrained)"
system = dhfr_constrained
marginals = ["configuration", "full"]
collision_rate_name = "low"
collision_rate = 1.0 / unit.picoseconds
n_protocol_samples = 100
protocol_length = 4000
experiment_name = "5_hmr"
descriptors_and_fnames = []
i = 1
for scale_factor in scale_factors[::-1]: # start from most interesting
for splitting_name in sorted(splittings.keys()):
for dt in dt_range[::-1]: # start from most interesting
for marginal in marginals:
partial_fname = "{}_{}.pkl".format(experiment_name, i)
full_filename = os.path.join(DATA_PATH, partial_fname)
experiment_descriptor = ExperimentDescriptor(
experiment_name=experiment_name,
system_name=system_name,
equilibrium_simulator=system,
splitting_name=splitting_name,
splitting_string=splittings[splitting_name],
timestep_in_fs=dt,
marginal=marginal,
collision_rate_name=collision_rate_name,
collision_rate=collision_rate,
n_protocol_samples=n_protocol_samples,
protocol_length=protocol_length,
h_mass_factor=scale_factor
)
descriptors_and_fnames.append((experiment_descriptor, full_filename))
i += 1
print(len(descriptors_and_fnames))
if __name__ == "__main__":
job_id = int(sys.argv[1])
experiment_descriptor, full_filename = descriptors_and_fnames[job_id]
Experiment(experiment_descriptor, full_filename, store_potential_energy_traces=True).run_and_save()
|
{"hexsha": "ccea17d78c439a95c774688aee8d0656ac1d26da", "size": 2212, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark/experiments/submission_scripts/5_hmr.py", "max_stars_repo_name": "choderalab/integrator-benchmark", "max_stars_repo_head_hexsha": "bb307e6ebf476b652e62e41ae49730f530732da3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-02-22T09:08:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-08T21:21:35.000Z", "max_issues_repo_path": "benchmark/experiments/submission_scripts/5_hmr.py", "max_issues_repo_name": "choderalab/integrator-benchmark", "max_issues_repo_head_hexsha": "bb307e6ebf476b652e62e41ae49730f530732da3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2017-04-15T21:34:25.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-22T13:56:40.000Z", "max_forks_repo_path": "benchmark/experiments/submission_scripts/5_hmr.py", "max_forks_repo_name": "choderalab/integrator-benchmark", "max_forks_repo_head_hexsha": "bb307e6ebf476b652e62e41ae49730f530732da3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-12-06T05:43:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-01T01:00:24.000Z", "avg_line_length": 33.5151515152, "max_line_length": 103, "alphanum_fraction": 0.6410488246, "include": true, "reason": "import numpy", "num_tokens": 485}
|
# Copyright 2021 Ibrahim Ayed, Emmanuel de Bézenac, Mickaël Chen, Jean-Yves Franceschi, Sylvain Lamprier, Patrick Gallinari
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import torch
import numpy as np
from torch.utils.data import Dataset, Subset
from gantk2.data.transforms import transform_factory
datasets = ['simple', 'mnist']
class IndicesDataset(Dataset):
def __init__(self, dataset):
super(IndicesDataset).__init__()
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
return self.dataset[index], index
def source_collate_fn(collate_fn, batch):
indices = torch.tensor([batch[i][1] for i in range(len(batch))])
return collate_fn([batch[i][0] for i in range(len(batch))]), indices
def dataset_dimensionality(dataset):
assert dataset in datasets
if dataset == 'mnist':
return [32, 32, 1]
else:
raise ValueError(f'No defined dimensionality for `{dataset}`')
def dataset_factory(config, source):
if source:
dataset_name = config.in_dataset
opposite_dataset = config.out_dataset
transform = transform_factory(config.in_transform)
else:
dataset_name = config.out_dataset
opposite_dataset = config.in_dataset
transform = transform_factory(config.out_transform)
assert dataset_name in datasets
if dataset_name == 'simple':
if opposite_dataset != 'simple' and not config.generator:
config.data_dim = dataset_dimensionality(opposite_dataset)
from gantk2.data.simple import get_dataset
dataset, collate_fn = get_dataset(config, transform, source)
else:
assert not source or not config.generator
config.data_dim = dataset_dimensionality(dataset_name)
if dataset_name == 'mnist':
from gantk2.data.mnist import get_dataset
dataset, collate_fn = get_dataset(config.data_path, transform, True)
else:
raise ValueError(f'No dataset named `{dataset_name}`')
if source:
nb_samples = config.in_nb_samples
else:
nb_samples = config.out_nb_samples
dataset_size = len(dataset)
assert nb_samples <= dataset_size
if nb_samples > 0:
chosen_indices = np.arange(dataset_size)
np.random.shuffle(chosen_indices)
dataset = Subset(dataset, chosen_indices[:nb_samples])
if source and not config.generator:
if collate_fn is not None:
collate_fn = functools.partial(source_collate_fn, collate_fn)
return IndicesDataset(dataset), collate_fn
else:
return dataset, collate_fn
|
{"hexsha": "5383e8e3f0f227419b704e8c8840197045527d74", "size": 3217, "ext": "py", "lang": "Python", "max_stars_repo_path": "gantk2/data/factory.py", "max_stars_repo_name": "emited/gantk2", "max_stars_repo_head_hexsha": "157ad5d7e4b3b46da3d8b6d7f1db5cfc80ebb426", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-06-10T09:42:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-05T11:48:11.000Z", "max_issues_repo_path": "gantk2/data/factory.py", "max_issues_repo_name": "emited/gantk2", "max_issues_repo_head_hexsha": "157ad5d7e4b3b46da3d8b6d7f1db5cfc80ebb426", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gantk2/data/factory.py", "max_forks_repo_name": "emited/gantk2", "max_forks_repo_head_hexsha": "157ad5d7e4b3b46da3d8b6d7f1db5cfc80ebb426", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-15T21:48:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-15T21:48:18.000Z", "avg_line_length": 33.8631578947, "max_line_length": 123, "alphanum_fraction": 0.6947466584, "include": true, "reason": "import numpy", "num_tokens": 692}
|
[STATEMENT]
lemma MGT_CALL1: "\<forall>p. {} |\<turnstile>\<^sub>t {MGT\<^sub>t(CALL p)}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>p. {} |\<turnstile>\<^sub>t {MGT\<^sub>t (CALL p)}
[PROOF STEP]
by(fastforce intro:MGT_CALL[THEN ConjE])
|
{"llama_tokens": 119, "file": "Abstract-Hoare-Logics_Procs_PsHoareTotal", "length": 1}
|
\problemname{Quality-Adjusted Life-Year}
%% Image URL: https://www.pexels.com/photo/sunset-sunshine-travel-wings-103127/
%% Image License: https://www.pexels.com/photo-license/
\illustration{0.33}{balcony.jpg}{~}
The Quality-Adjusted Life-Year (QALY) is a way to measure a person's
quality of life that includes both the quality and the quantity of
life lived.
The quality of life lived can be quantified as a number between $0$ and
$1$. If someone is living with perfect health, the quality of life is
$1$. If someone is dead, then the quality of life is $0$. The quality of
life may increase or decrease due to medical treatements, sickness,
etc.
The QALY for each period in which the quality of life is constant is
simply the product of the quality of life and the length of the period
(in years). We wish to know the amount of QALY accumulated by a
person at the time of death, given the complete history of this
person.
\section*{Input}
The first line of input contains a single integer $N$~($1 \leq N \leq 100$), which is the number of periods of constant quality of life during the person's lifetime.
The next $N$ lines describe the periods of life. Each of these lines contains two real numbers $q$~($0 < q \leq 1$), which is the quality of life in this period, and $y$~($0 < y \leq 100$), which is the number of years in this period. All real numbers will be specified to exactly one decimal place.
\section*{Output}
Display the QALY accumulated by the person. Your answer will be considered correct if its absolute error does not exceed $10^{-3}$.
|
{"hexsha": "8dad911101b0b18649e3d6ab591fca59933d30fb", "size": 1575, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "problems/qaly/problem_statement/problem.tex", "max_stars_repo_name": "icpc/na-rocky-mountain-2018-public", "max_stars_repo_head_hexsha": "416a94258f99ab68ff7d9777faca55c94cdaf5f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-22T16:34:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T16:34:26.000Z", "max_issues_repo_path": "problems/qaly/problem_statement/problem.tex", "max_issues_repo_name": "icpc/na-rocky-mountain-2018-public", "max_issues_repo_head_hexsha": "416a94258f99ab68ff7d9777faca55c94cdaf5f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problems/qaly/problem_statement/problem.tex", "max_forks_repo_name": "icpc/na-rocky-mountain-2018-public", "max_forks_repo_head_hexsha": "416a94258f99ab68ff7d9777faca55c94cdaf5f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.7272727273, "max_line_length": 299, "alphanum_fraction": 0.7517460317, "num_tokens": 408}
|
from setuptools import (
setup,
find_packages,
Extension
)
from setupext import check_for_openmp
import os
import numpy as np
from Cython.Build import cythonize
if check_for_openmp():
omp_args = ['-fopenmp']
else:
omp_args = None
if os.name == "nt":
std_libs = []
else:
std_libs = ["m"]
extensions = [
Extension("ewah_bool_utils.ewah_bool_wrap",
["ewah_bool_utils/ewah_bool_wrap.pyx"],
include_dirs=["ewah_bool_utils",
"ewah_bool_utils/cpp",
np.get_include()],
language="c++"),
Extension("ewah_bool_utils.morton_utils",
["ewah_bool_utils/morton_utils.pyx"],
extra_compile_args=omp_args,
extra_link_args=omp_args,
libraries=std_libs,
include_dirs=[np.get_include()])
]
setup(
ext_modules = cythonize(extensions),
)
|
{"hexsha": "31cf28e7b7fcef379cfcee18169d77094c15830a", "size": 927, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "themousepotato/ewah_bool_utils", "max_stars_repo_head_hexsha": "eb1bc18e62d02a715a441a6731cf29a38aad158d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "themousepotato/ewah_bool_utils", "max_issues_repo_head_hexsha": "eb1bc18e62d02a715a441a6731cf29a38aad158d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-04T21:34:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-06T12:37:09.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "themousepotato/ewah_bool_utils", "max_forks_repo_head_hexsha": "eb1bc18e62d02a715a441a6731cf29a38aad158d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-05T18:44:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-04T20:42:53.000Z", "avg_line_length": 23.175, "max_line_length": 53, "alphanum_fraction": 0.5965480043, "include": true, "reason": "import numpy", "num_tokens": 213}
|
#!/usr/bin/python3.4
import os
import bpy
import glob
import argparse
import numpy as np
from bpy import context
scene = context.scene
import sys
argv = sys.argv
argv = argv[argv.index("-P") + 1:] # get all args after "--"
argv.remove("--")
sys.argv = argv
print(argv) # --> ['example', 'args', '123']
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
# return open(arg, 'r') # return an open file #handle
parser = argparse.ArgumentParser(description='Plot data array [META_DATA] and [video] as an [OUT]<rubbish>.dvd @ 30fps using h264 encoding')
parser.add_argument('file', metavar="META_DATA", help='the file to process', type=lambda x: is_valid_file(parser, x))
parser.add_argument('--video', dest='video',default="vid/video.mov",
help='video file name', type=lambda x: is_valid_file(parser, x))
parser.add_argument('--out', dest='output',default="output",
help='output file name (default: video<rubbish>.dvd)')
parser.add_argument('--fps', dest='fps',default=30, type=int,
help='input video frame_rate' )
parser.add_argument('--offset', dest='offset', default=0.0, type=float,
help='video offset in seconds' )
parser.add_argument('-v', '--verbose', dest='DEBUG', action='store_true', default=False, help='Print stuff and save the blend file')
#parser.add_argument('--', dest='blender', action='store_true', default=False, help='running as blender script')
args = parser.parse_args()
path = os.path.dirname(os.path.realpath((args.file))) #"/home/mro7/data/blend_set/"
filename = os.path.basename(args.file) #"meta_data.dat"
vidpath = os.path.dirname(os.path.realpath((args.video))) #"vid/video.mov"
vidname = os.path.basename(args.video)
#options
resx = 1280#720 #1920
resy = 720 #480; #1080
scene.render.fps = args.fps #30
scene.render.filepath = args.output #"video.avi"
DEBUG = args.DEBUG #True#False
video_offset = args.offset #10
# load in the metadata (wasteful, but reading all columns means we can add stuff to it)
meta_data = np.genfromtxt("%s/%s"%(path,filename), dtype=None, delimiter=' ', names=True)
# create the sequencer data
scene.sequence_editor_create()
#files = glob.glob("%s/frame*.png"%(path))
#files.sort()
#print(files)
#exit(0)
# set the movie strip / scale and offset
#load new movie clip
bpy.data.movieclips.load("%s/%s"%(vidpath,vidname))
#get the new movie clip
movie_clip = bpy.data.movieclips.get(vidname)
#assign movie clip to the node
#bpy.context.scene.node_tree.nodes['Movie Clip'].clip = movie_clip
vid = scene.sequence_editor.sequences.new_clip(#movie(
name="video_clip",#os.path.basename(f),
#filepath = vidpath,
clip = movie_clip,
channel=1, frame_start=-video_offset*scene.render.fps)
#translation
vid.blend_type = 'ADD'
vid.use_translation = True
#vid.transform.offset_x = 720
#vid.transform.offset_y = 0
### compute scale
scale_x = (660)/resx#movie_clip.size[0]
scale_y = (440)/resy#movie_clip.size[0]
uniform_scale = (scale_x if (scale_x<scale_y) else scale_y)
#movie_clip = bpy.data.movieclips.get(os.path.basename(vidpath))
#print(movie_clip.size[0],movie_clip.size[1], uniform_scale)
#exit(1)
scale = scene.sequence_editor.sequences.new_effect(
name="video_scale",
channel=2,
type = 'TRANSFORM',
frame_start = -video_offset*scene.render.fps,
seq1 = vid
)
scale.blend_type = 'ALPHA_OVER'
#scale.use_uniform_scale = True
scale.scale_start_y = uniform_scale
scale.scale_start_x = uniform_scale
scale.use_translation = True
scale.transform.offset_x = resx/4-20 #(half+30)/2
scale.transform.offset_y = 0#((resy/4)-25)
#print(dir(vid))
#exit(1)
# set the frame/time counters
#seq = []
frame_origin = meta_data['timestamp'][0]-0.5
current_frame = 0
duration = 0
seq = None
sseq = None
for row in meta_data:
filename = "grabbedImage_%04d.png"%row['image_number']
statsname = "image%04d.png"%row['image_number']
duration = (row['timestamp']-frame_origin)*scene.render.fps
frame_origin = row['timestamp']
#try:
# seq.frame_final_duration = duration #+ scene.render.fps
# sseq.frame_final_duration = duration #+ scene.render.fps
#except:
# pass
### no if try hack
try:
seq.frame_final_end = current_frame #duration #+ scene.render.fps
sseq.frame_final_end = current_frame #duration #+ scene.render.fps
except:
pass
# image sequencer
seq = scene.sequence_editor.sequences.new_image(
name=filename,#os.path.basename(f),
filepath=os.path.join(path, filename),
channel=3, frame_start=current_frame)
#seq.frame_final_duration = duration
#current_frame = current_frame+duration
if(DEBUG):
print("%s @ %f s ~> %d frames"%(filename, duration/scene.render.fps, duration))
#translation
seq.blend_type = 'OVER_DROP'
seq.use_translation = True
seq.transform.offset_x = 0
seq.transform.offset_y = 172
# now stats sequencer sseq
sseq = scene.sequence_editor.sequences.new_image(
name=statsname,#os.path.basename(f),
filepath=os.path.join(path, statsname),
channel=4, frame_start=current_frame)
#seq.frame_final_duration = duration
#translation
sseq.blend_type = 'OVER_DROP'
sseq.use_translation = True
sseq.transform.offset_x = 100
sseq.transform.offset_y = 0 # should be resy*0.567 (440/776) -> above the images, but we use fixed width/height either way
# post increment
current_frame = current_frame+duration
#exit(0)
### fill duration till end of second ( blender wants that)
while(current_frame%scene.render.fps>=1):
current_frame+=1
seq.frame_final_end = current_frame
sseq.frame_final_end = current_frame
'''
for f in files:
duration = 30
seq = scene.sequence_editor.sequences.new_image(
name=os.path.basename(f),
filepath=f,# files[0],#os.path.join(path, files[0]),
channel=1, frame_start=current_frame)
seq.frame_final_duration = duration
#seq.use_reverse_frames = False #no such thing as reverse order on 1 frame
current_frame = current_frame+duration
'''
# add the rest of the images.
#for f in files:
# print(f)
# seq.elements.append(os.path.basename(f))
#render settings
scene.render.resolution_x = resx
scene.render.resolution_y = resy
scene.render.resolution_percentage = 100
scene.render.use_sequencer = 1
scene.frame_start = 0
scene.frame_end = current_frame # ignores the last frame
#actual encoder
scene.render.image_settings.file_format = "FFMPEG" #'AVI_JPEG'
scene.render.ffmpeg.codec = "H264"
scene.render.ffmpeg.audio_codec = 'NONE'
#scene.render.ffmpeg.video_bitrate = 24300
#scene.render.ffmpeg.audio_bitrate = 0
#scene.render.ffmpeg.minrate = 0
#scene.render.ffmpeg.maxrate = 30000
#scene.render.ffmpeg.buffersize = 2147483647
#and render
data_context = {"blend_data": context.blend_data, "scene": scene, "area": context.area, "window": context.window, "region": context.region}
bpy.ops.render.render(data_context, animation=True)
#print(dir(context))
#debug save
if DEBUG:
bpy.ops.wm.save_as_mainfile(filepath="generated.blend")
|
{"hexsha": "bc6c016495971dd30d1083181342be2830f5cf42", "size": 7285, "ext": "py", "lang": "Python", "max_stars_repo_path": "mud_me_a_video.py", "max_stars_repo_name": "ososinski/video_blender", "max_stars_repo_head_hexsha": "70bee9dfd1d74caf6eb0076035a9209041117f91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-30T11:52:16.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-30T11:52:16.000Z", "max_issues_repo_path": "mud_me_a_video.py", "max_issues_repo_name": "ososinski/video_blender", "max_issues_repo_head_hexsha": "70bee9dfd1d74caf6eb0076035a9209041117f91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mud_me_a_video.py", "max_forks_repo_name": "ososinski/video_blender", "max_forks_repo_head_hexsha": "70bee9dfd1d74caf6eb0076035a9209041117f91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2660944206, "max_line_length": 140, "alphanum_fraction": 0.6999313658, "include": true, "reason": "import numpy", "num_tokens": 1897}
|
# UE computation
function updatechoices!()#(ECᵢ, DTC, SR)
global ECᵢ#, DTC, SR
newDTC = zero(DTC)
#newSR = SR
newSR = Dict(d => Dict(i => zeros(T, nsinks, nclasses) for i in 1:2) for d in divs);
λ = 1e-4
for (srcid,src) in enumerate(srcs)
i = outlinkids(net, src)[1]
for (snkid,snk) in enumerate(snks)
for clsid in 1:nclasses
c = ECᵢ[i,1:Tm,snkid,clsid]
dtc = DTC[1:Tm, srcid,snkid,clsid]
newdtc = proportionalize(solveqp(c, dtc, λ))[:,1]
newDTC[1:Tm, srcid,snkid,clsid] .= newdtc
end
end
end
for div in divs
oli = outlinkids(net, div)
for t in 1:T
for (snkid,snk) in enumerate(snks)
for clsid in 1:nclasses
sr = [SR[div][i][t, snkid,clsid] for i in 1:2]
c = ECᵢ[oli,t,snkid,clsid]
newsr = proportionalize(solveqp(c, sr, λ))[:,1]
for i in 1:2
newSR[div][i][t,snkid,clsid] = newsr[i]
end
end
end
end
end
return newDTC, newSR
end
function relgap(ECᵢ, DTC)
_num, _den = 0., 0.
for (srcid,src) in enumerate(srcs)
i = outlinkids(net, src)[1]
for (snkid,snk) in enumerate(snks)
for clsid in 1:nclasses
m = minimum(ECᵢ[i,1:Tm,snkid,clsid])
a = sum(ECᵢ[i,1:Tm,snkid,clsid] .* DTC[1:Tm, srcid,snkid,clsid])
_num += (a - m)
_den += m
end
end
end
#@show (_num/_den)
return _num/_den
end
# equilibrium computation
function optfn!(x, grad, p)
if length(grad) > 0
grad .= (x .- p)
end
return 0.5*sum((x .- p).^2)
end
function eqcon!(x, grad)
if length(grad) > 0
grad .= 1
end
return sum(x) - 1
end
function solveqp(c, dtc, λ)
p = dtc .- λ * c
"""x = Variable(length(p))
problem = minimize(0.5*sumsquares(x - p), sum(x) == 1, x >= 0.)
solve!(problem, () -> SCS.Optimizer(verbose=false))
if (problem.status != MOI.OPTIMAL)
@show c, dtc, λ
end
return x.value"""
return projpx(p)
end
function proportionalize(x; digits=6)
x[x .< 0.] .= 0
x = round.(x, digits=digits)
x ./= sum(x)
return x
end
|
{"hexsha": "a96f0e8f6bd81dc4d0495dda571c2d6b67a8bf9f", "size": 2367, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "equilibrium.jl", "max_stars_repo_name": "SaiKiran92/dta_julia", "max_stars_repo_head_hexsha": "42ef131bb3ea562ea73646ffd5f8be6314afc687", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-20T00:09:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-20T00:09:39.000Z", "max_issues_repo_path": "equilibrium.jl", "max_issues_repo_name": "SaiKiran92/dta_julia", "max_issues_repo_head_hexsha": "42ef131bb3ea562ea73646ffd5f8be6314afc687", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "equilibrium.jl", "max_forks_repo_name": "SaiKiran92/dta_julia", "max_forks_repo_head_hexsha": "42ef131bb3ea562ea73646ffd5f8be6314afc687", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3, "max_line_length": 88, "alphanum_fraction": 0.4997887621, "num_tokens": 817}
|
## Usage
## copy paste it into the whole file of sawywer_push_nips.py
from collections import OrderedDict
import numpy as np
from gym.spaces import Box, Dict
import mujoco_py
import random
from multiworld.core.serializable import Serializable
from multiworld.envs.env_util import (
get_stat_in_paths,
create_stats_ordered_dict,
get_asset_full_path,
)
from multiworld.envs.mujoco.mujoco_env import MujocoEnv
import copy
from multiworld.core.multitask_env import MultitaskEnv
class SawyerPushAndReachXYEnv(MujocoEnv, Serializable, MultitaskEnv):
INIT_HAND_POS = np.array([0, 0.4, 0.02])
def __init__(
self,
reward_info=None,
frame_skip=50,
pos_action_scale=2. / 100,
randomize_goals=True,
hide_goal=False,
init_block_low=(-0.05, 0.55),
init_block_high=(0.05, 0.65),
puck_goal_low=(-0.05, 0.55),
puck_goal_high=(0.05, 0.65),
hand_goal_low=(-0.05, 0.55),
hand_goal_high=(0.05, 0.65),
fixed_puck_goal=(0.05, 0.6),
fixed_hand_goal=(-0.05, 0.6),
mocap_low=(-0.1, 0.5, 0.0),
mocap_high=(0.1, 0.7, 0.5),
force_puck_in_goal_space=False,
):
self.quick_init(locals())
self.reward_info = reward_info
self.randomize_goals = randomize_goals
self._pos_action_scale = pos_action_scale
self.hide_goal = hide_goal
self.init_block_low = np.array(init_block_low)
self.init_block_high = np.array(init_block_high)
self.puck_goal_low = np.array(puck_goal_low)
self.puck_goal_high = np.array(puck_goal_high)
self.hand_goal_low = np.array(hand_goal_low)
self.hand_goal_high = np.array(hand_goal_high)
self.fixed_puck_goal = np.array(fixed_puck_goal)
self.fixed_hand_goal = np.array(fixed_hand_goal)
self.mocap_low = np.array(mocap_low)
self.mocap_high = np.array(mocap_high)
self.force_puck_in_goal_space = force_puck_in_goal_space
self._goal_xyxy = self.sample_goal_xyxy()
# MultitaskEnv.__init__(self, distance_metric_order=2)
MujocoEnv.__init__(self, self.model_name, frame_skip=frame_skip)
self.action_space = Box(
np.array([-1, -1]),
np.array([1, 1]),
)
self.obs_box = Box(
np.array([-0.15, 0.5, -0.15, 0.5]),
np.array([0.15, 0.7, 0.15, 0.7]),
)
goal_low = np.concatenate((self.hand_goal_low, self.puck_goal_low, self.puck_goal_low))
goal_high = np.concatenate((self.hand_goal_high, self.puck_goal_high, self.puck_goal_high))
self.goal_box = Box(
goal_low,
goal_high,
)
self.observation_space = Dict([
('observation', self.obs_box),
('state_observation', self.obs_box),
('desired_goal', self.goal_box),
('state_desired_goal', self.goal_box),
('achieved_goal', self.goal_box),
('state_achieved_goal', self.goal_box),
])
# hack for state-based experiments for other envs
# self.observation_space = Box(
# np.array([-0.2, 0.5, -0.2, 0.5, -0.2, 0.5]),
# np.array([0.2, 0.7, 0.2, 0.7, 0.2, 0.7]),
# )
# self.goal_space = Box(
# np.array([-0.2, 0.5, -0.2, 0.5, -0.2, 0.5]),
# np.array([0.2, 0.7, 0.2, 0.7, 0.2, 0.7]),
# )
self.reset()
self.reset_mocap_welds()
@property
def model_name(self):
# return get_asset_full_path(
# 'sawyer_xyz/sawyer_push_and_reach_mocap_goal_hidden.xml'
# )
return get_asset_full_path(
'sawyer_xyz/sawyer_push_and_reach_mocap_goal_hidden.xml'
)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = 1.0
# robot view
# rotation_angle = 90
# cam_dist = 1
# cam_pos = np.array([0, 0.5, 0.2, cam_dist, -45, rotation_angle])
# 3rd person view
cam_dist = 0.3
rotation_angle = 270
cam_pos = np.array([0, 1.0, 0.5, cam_dist, -45, rotation_angle])
# top down view
# cam_dist = 0.2
# rotation_angle = 0
# cam_pos = np.array([0, 0, 1.5, cam_dist, -90, rotation_angle])
for i in range(3):
self.viewer.cam.lookat[i] = cam_pos[i]
self.viewer.cam.distance = cam_pos[3]
self.viewer.cam.elevation = cam_pos[4]
self.viewer.cam.azimuth = cam_pos[5]
self.viewer.cam.trackbodyid = -1
def step(self, a):
a = np.clip(a, -1, 1)
mocap_delta_z = 0.06 - self.data.mocap_pos[0, 2]
new_mocap_action = np.hstack((
a,
np.array([mocap_delta_z])
))
self.mocap_set_action(new_mocap_action[:3] * self._pos_action_scale)
if self.force_puck_in_goal_space:
puck_pos = self.get_puck_pos()[:2]
clipped = np.clip(
puck_pos,
self.puck_goal_low,
self.puck_goal_high
)
if not (clipped == puck_pos).all():
self.set_puck_xy(clipped)
u = np.zeros(7)
self.do_simulation(u, self.frame_skip)
obs = self._get_obs()
# reward = self.compute_reward(obs, u, obs, self._goal_xyxy)
reward = self.compute_reward(a, obs)
done = False
hand_distance = np.linalg.norm(
self.get_hand_goal_pos() - self.get_endeff_pos()
)
puck_distance = np.linalg.norm(
self.get_puck_goal_pos() - self.get_puck_pos())
touch_distance = np.linalg.norm(
self.get_endeff_pos() - self.get_puck_pos())
info = dict(
hand_distance=hand_distance,
puck_distance=puck_distance,
touch_distance=touch_distance,
success=float(hand_distance + puck_distance < 0.06),
)
return obs, reward, done, info
def mocap_set_action(self, action):
pos_delta = action[None]
new_mocap_pos = self.data.mocap_pos + pos_delta
new_mocap_pos[0, :] = np.clip(
new_mocap_pos[0, :],
self.mocap_low,
self.mocap_high
)
# new_mocap_pos[0, 0] = np.clip(
# new_mocap_pos[0, 0],
# -0.1,
# 0.1,
# )
# new_mocap_pos[0, 1] = np.clip(
# new_mocap_pos[0, 1],
# -0.1 + 0.6,
# 0.1 + 0.6,
# )
# new_mocap_pos[0, 2] = np.clip(
# new_mocap_pos[0, 2],
# 0,
# 0.5,
# )
self.data.set_mocap_pos('mocap', new_mocap_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
def _get_obs(self):
e = self.get_endeff_pos()[:2]
b = self.get_puck_pos()[:2]
x = np.concatenate((e, b))
g = self._goal_xyxy
new_obs = dict(
observation=x,
state_observation=x,
desired_goal=g,
state_desired_goal=g,
achieved_goal=x,
state_achieved_goal=x,
)
return new_obs
def get_puck_pos(self):
return self.data.body_xpos[self.puck_id].copy()
def get_endeff_pos(self):
return self.data.body_xpos[self.endeff_id].copy()
def get_hand_goal_pos(self):
return self.data.body_xpos[self.hand_goal_id].copy()
def get_puck_goal_pos(self):
return self.data.body_xpos[self.puck_goal_id].copy()
@property
def endeff_id(self):
return self.model.body_names.index('leftclaw')
@property
def puck_id(self):
return self.model.body_names.index('puck')
@property
def puck_goal_id(self):
return self.model.body_names.index('puck-goal')
@property
def hand_goal_id(self):
return self.model.body_names.index('hand-goal')
def sample_goal_xyxy(self):
if self.randomize_goals:
hand = np.random.uniform(self.hand_goal_low, self.hand_goal_high)
puck = np.random.uniform(self.puck_goal_low, self.puck_goal_high)
puck1 = np.random.uniform(self.puck_goal_low, self.puck_goal_high)
else:
hand = self.fixed_hand_goal.copy()
puck = self.fixed_puck_goal.copy()
puck1 = self.fixed_puck_goal.copy()
return np.hstack((hand, puck , puck1))
def sample_puck_xy(self):
raise NotImplementedError("Shouldn't you use "
"SawyerPushAndReachXYEasyEnv? Ask Vitchyr")
pos = np.random.uniform(self.init_block_low, self.init_block_high)
while np.linalg.norm(self.get_endeff_pos()[:2] - pos) < 0.035:
pos = np.random.uniform(self.init_block_low, self.init_block_high)
return pos
def set_puck_xy(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[7:10] = np.hstack((pos.copy(), np.array([0.02])))
qvel[7:10] = [0, 0, 0]
self.set_state(qpos, qvel)
def set_puck1_xy(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[14:17] = np.hstack((pos.copy(), np.array([0.02])))
qvel[14:17] = [0, 0, 0]
self.set_state(qpos, qvel)
def set_goal_xyxy(self, xyxy):
self._goal_xyxy = xyxy
hand_goal = xyxy[:2]
puck_goal = xyxy[-2:]
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[21:24] = np.hstack((hand_goal.copy(), np.array([0.02])))
qvel[21:24] = [0, 0, 0]
qpos[28:31] = np.hstack((puck_goal.copy(), np.array([0.02])))
qvel[28:31] = [0, 0, 0]
self.set_state(qpos, qvel)
def reset_mocap_welds(self):
"""Resets the mocap welds that we use for actuation."""
sim = self.sim
if sim.model.nmocap > 0 and sim.model.eq_data is not None:
for i in range(sim.model.eq_data.shape[0]):
if sim.model.eq_type[i] == mujoco_py.const.EQ_WELD:
sim.model.eq_data[i, :] = np.array(
[0., 0., 0., 1., 0., 0., 0.])
sim.forward()
def reset_mocap2body_xpos(self):
# move mocap to weld joint
self.data.set_mocap_pos(
'mocap',
np.array([self.data.body_xpos[self.endeff_id]]),
)
self.data.set_mocap_quat(
'mocap',
np.array([self.data.body_xquat[self.endeff_id]]),
)
def reset(self):
velocities = self.data.qvel.copy()
angles = np.array(self.init_angles)
self.set_state(angles.flatten(), velocities.flatten())
for _ in range(10):
self.data.set_mocap_pos('mocap', self.INIT_HAND_POS)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
# set_state resets the goal xy, so we need to explicit set it again
self._goal_xyxy = self.sample_goal_for_rollout()
self.set_goal_xyxy(self._goal_xyxy)
self.set_puck_xy(self.sample_puck_xy())
self.set_puck1_xy(self.sample_puck_xy())
self.reset_mocap_welds()
return self._get_obs()
def compute_rewards(self, action, obs, info=None):
r = -np.linalg.norm(
obs['state_achieved_goal'] - obs['state_desired_goal'], axis=1)
return r
def compute_reward(self, action, obs, info=None):
r = -np.linalg.norm(
obs['state_achieved_goal'] - obs['state_desired_goal'])
return r
# REPLACING REWARD FN
# def compute_reward(self, ob, action, next_ob, goal, env_info=None):
# hand_xy = next_ob[:2]
# puck_xy = next_ob[-2:]
# hand_goal_xy = goal[:2]
# puck_goal_xy = goal[-2:]
# hand_dist = np.linalg.norm(hand_xy - hand_goal_xy)
# puck_dist = np.linalg.norm(puck_xy - puck_goal_xy)
# if not self.reward_info or self.reward_info["type"] == "euclidean":
# r = - hand_dist - puck_dist
# elif self.reward_info["type"] == "state_distance":
# r = -np.linalg.norm(next_ob - goal)
# elif self.reward_info["type"] == "hand_only":
# r = - hand_dist
# elif self.reward_info["type"] == "puck_only":
# r = - puck_dist
# elif self.reward_info["type"] == "sparse":
# t = self.reward_info["threshold"]
# r = float(
# hand_dist + puck_dist < t
# ) - 1
# else:
# raise NotImplementedError("Invalid/no reward type.")
# return r
def compute_her_reward_np(self, ob, action, next_ob, goal, env_info=None):
return self.compute_reward(ob, action, next_ob, goal, env_info=env_info)
# @property
# def init_angles(self):
# return [
# 1.06139477e+00, -6.93988797e-01, 3.76729934e-01, 1.78410587e+00,
# - 5.36763074e-01, 5.88122189e-01, 3.51531533e+00,
# 0.05, 0.55, 0.02,
# 1, 0, 0, 0,
# 0, 0.6, 0.02,
# 1, 0, 1, 0,
# 0, 0.6, 0.02,
# 1, 0, 1, 0,
# ]
# @property
# def init_angles(self):
# return [1.78026069e+00, - 6.84415781e-01, - 1.54549202e-01,
# 2.30672090e+00, 1.93111471e+00, 1.27854005e-01,
# 1.49353907e+00, 1.80196716e-03, 7.40415708e-01,
# 2.09895360e-02, 9.99999990e-01, 3.05766111e-05,
# - 3.78462492e-06, 1.38684523e-04, - 3.62518814e-02,
# 6.13435141e-01, 2.09686080e-02, 7.071067817-01,
# 1.48979724e-14, 7.07106781e-01, - 1.48999120e-14,
# 0, 0.6, 0.02,
# 1, 0, 1, 0,
# ]
@property
def init_angles(self):
return [1.78026069e+00, - 6.84415781e-01, - 1.54549202e-01,
2.30672090e+00, 1.93111471e+00, 1.27854005e-01,
1.49353907e+00, 1.80196716e-03, 7.40415708e-01,
2.09895360e-02, 9.99999990e-01, 3.05766111e-05,
- 3.78462492e-06, 1.38684513e-04,
0.08, 0.60, 0.02,
1, 0, 0, 0,
0, 0.6, 0.02,
1, 0, 1, 0,
0, 0.6, 0.02,
1, 0, 1, 0,
0, 0.6, 0.02,
1, 0, 1, 0,
]
def get_diagnostics(self, paths, prefix=""):
statistics = OrderedDict()
for stat_name in [
'hand_distance',
'puck_distance',
'touch_distance',
'success',
]:
stat_name = stat_name
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict(
'%s%s' % (prefix, stat_name),
stat,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Final %s%s' % (prefix, stat_name),
[s[-1] for s in stat],
always_show_all_stats=True,
))
return statistics
"""
Multitask functions
"""
@property
def goal_dim(self) -> int:
return 4
def sample_goals(self, batch_size):
# goals = np.zeros((batch_size, self.goal_box.low.size))
# for b in range(batch_size):
# goals[b, :] = self.sample_goal_xyxy()
print('running this')
goals = np.random.uniform(
self.goal_box.low,
self.goal_box.high,
size=(batch_size, self.goal_box.low.size),
)
return {
'desired_goal': goals,
'state_desired_goal': goals,
}
def sample_goal_for_rollout(self):
g = self.sample_goal_xyxy()
return g
# OLD SET GOAL
# def set_goal(self, goal):
# MultitaskEnv.set_goal(self, goal)
# self.set_goal_xyxy(goal)
# # hack for VAE
# self.set_to_goal(goal)
def get_goal(self):
return {
'desired_goal': self._goal_xyxy,
'state_desired_goal': self._goal_xyxy,
}
# not being used
def set_goal(self, goal):
state_goal = goal['state_desired_goal']
self.set_goal_xyxy(state_goal)
def set_to_goal(self, goal):
state_goal = goal['state_desired_goal']
#print(state_goal.shape)
self.set_hand_xy(state_goal[:2])
self.set_puck_xy(state_goal[-2:])
self.set_puck1_xy(state_goal[-4:-2])
def convert_obs_to_goals(self, obs):
return obs
def set_hand_xy(self, xy):
for _ in range(10):
self.data.set_mocap_pos('mocap', np.array([xy[0], xy[1], 0.02]))
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
u = np.zeros(7)
self.do_simulation(u, self.frame_skip)
def get_env_state(self):
joint_state = self.sim.get_state()
mocap_state = self.data.mocap_pos, self.data.mocap_quat
state = joint_state, mocap_state
return copy.deepcopy(state)
def set_env_state(self, state):
joint_state, mocap_state = state
self.sim.set_state(joint_state)
mocap_pos, mocap_quat = mocap_state
self.data.set_mocap_pos('mocap', mocap_pos)
self.data.set_mocap_quat('mocap', mocap_quat)
self.sim.forward()
def generate_new_state(self,goal):
num_1 = random.randint(0,1)
num_2 = 1-num_1
direction = random.randint(0,3)
state_goal = goal['state_desired_goal']
if num_1 == 1:
state_goal[-4:-2] = state_goal[-2:]
# North
if direction == 0:
state_goal[-3] +=0.11
# Ease
if direction == 1:
state_goal[-4] -=0.09
# South
if direction == 2:
state_goal[-3] -=0.11
# Ease
if direction == 3:
state_goal[-4] +=0.09
elif num_2 ==1:
state_goal[-2:] = state_goal[-4:-2]
# North
if direction == 0:
state_goal[-1] +=0.11
# Ease
if direction == 1:
state_goal[-2] -=0.09
# South
if direction == 2:
state_goal[-1] -=0.11
# Ease
if direction == 3:
state_goal[-2] +=0.09
#print(state_goal.shape)
self.set_hand_xy(state_goal[:2])
self.set_puck_xy(state_goal[-2:])
self.set_puck1_xy(state_goal[-4:-2])
return np.array((num_1,num_2,direction))
class SawyerPushAndReachXYEasyEnv(SawyerPushAndReachXYEnv):
"""
Always start the block in the same position, and use a 40x20 puck space
"""
def __init__(
self,
**kwargs
):
self.quick_init(locals())
default_kwargs = dict(
puck_goal_low=(-0.15, 0.5),
puck_goal_high=(0.15, 0.7),
)
actual_kwargs = {
**default_kwargs,
**kwargs
}
SawyerPushAndReachXYEnv.__init__(
self,
**actual_kwargs
)
def sample_puck_xy(self):
return np.array([0, 0.6])
class SawyerPushAndReachXYHarderEnv(SawyerPushAndReachXYEnv):
"""
Fixed initial position, all spaces are 40cm x 20cm
"""
def __init__(
self,
**kwargs
):
self.quick_init(locals())
SawyerPushAndReachXYEnv.__init__(
self,
hand_goal_low=(-0.15, 0.5),
hand_goal_high=(0.15, 0.7),
puck_goal_low=(-0.15, 0.5),
puck_goal_high=(0.15, 0.7),
mocap_low=(-0.15, 0.5, 0.0),
mocap_high=(0.15, 0.7, 0.5),
**kwargs
)
def sample_puck_xy(self):
return np.random.uniform(self.puck_goal_low, self.puck_goal_high)
|
{"hexsha": "6afa394083946c136e271b8c6fb46519a81ef6fc", "size": 20015, "ext": "py", "lang": "Python", "max_stars_repo_path": "multiworld/envs/mujoco/sawyer_xyz/sawyer_push_nips_get_DATA.py", "max_stars_repo_name": "luuckiest/multiworld", "max_stars_repo_head_hexsha": "3878b5480579eda169482fd013d2097956f3d083", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "multiworld/envs/mujoco/sawyer_xyz/sawyer_push_nips_get_DATA.py", "max_issues_repo_name": "luuckiest/multiworld", "max_issues_repo_head_hexsha": "3878b5480579eda169482fd013d2097956f3d083", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multiworld/envs/mujoco/sawyer_xyz/sawyer_push_nips_get_DATA.py", "max_forks_repo_name": "luuckiest/multiworld", "max_forks_repo_head_hexsha": "3878b5480579eda169482fd013d2097956f3d083", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6386554622, "max_line_length": 99, "alphanum_fraction": 0.5532850362, "include": true, "reason": "import numpy", "num_tokens": 5720}
|
import logging
import random
import numpy as np
import torch
from fastprogress.fastprogress import progress_bar
from torch.utils.data import DataLoader, SequentialSampler
from transformers import ElectraForSequenceClassification, ElectraTokenizer
logger = logging.getLogger(__name__)
class GrandChallengeTextClassifier:
def __init__(self, args):
self.args = args
self.tokenizer = ElectraTokenizer.from_pretrained(
self.args.tokenizer_dir, do_lower_case=self.args.do_lower_case
)
logger.info("Predict text class the following checkpoints: %s", self.args.ckpt_dir)
self.args.device = "cuda" if torch.cuda.is_available() and not self.args.no_cuda else "cpu"
self.model = ElectraForSequenceClassification.from_pretrained(self.args.ckpt_dir)
self.model.to(self.args.device)
self.model.eval()
def predict(self, test_dataset):
sampler = SequentialSampler(test_dataset)
dataloader = DataLoader(test_dataset, sampler=sampler, batch_size=self.args.eval_batch_size)
logger.info("Num examples = {}".format(len(test_dataset)))
preds = None
for batch in progress_bar(dataloader):
batch = tuple(t.to(self.args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
outputs = self.model(**inputs)
logits = outputs[1]
preds = logits.detach().cpu().numpy()
preds = np.argmax(preds, axis=1)
return preds
|
{"hexsha": "c850de12a2a2ca68533664ae8b93f14991da04a6", "size": 1677, "ext": "py", "lang": "Python", "max_stars_repo_path": "text_classification/predictor.py", "max_stars_repo_name": "voithru/asr-text_classification-pipeline", "max_stars_repo_head_hexsha": "720106cc7e06e7d7a811dee14f660be2b4fb97fc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-12-28T02:16:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-09T23:02:25.000Z", "max_issues_repo_path": "text_classification/predictor.py", "max_issues_repo_name": "voithru/asr-text_classification-pipeline", "max_issues_repo_head_hexsha": "720106cc7e06e7d7a811dee14f660be2b4fb97fc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "text_classification/predictor.py", "max_forks_repo_name": "voithru/asr-text_classification-pipeline", "max_forks_repo_head_hexsha": "720106cc7e06e7d7a811dee14f660be2b4fb97fc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4909090909, "max_line_length": 100, "alphanum_fraction": 0.6451997615, "include": true, "reason": "import numpy", "num_tokens": 337}
|
from tequila.circuit import gates
from tequila.objective import ExpectationValue
from tequila.objective.objective import Variable
from tequila.hamiltonian import paulis
from tequila import simulate
import tequila
from tequila.circuit.noise import BitFlip,PhaseDamp,PhaseFlip,AmplitudeDamp,PhaseAmplitudeDamp,DepolarizingError
import numpy
import pytest
samplers = [k for k in tequila.INSTALLED_SAMPLERS.keys() if k not in ['qulacs'] ]
@pytest.mark.dependencies
def test_dependencies():
assert 'qiskit' in samplers
assert 'pyquil' in samplers
assert 'cirq' in samplers
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
@pytest.mark.parametrize('controlled',[False,True])
def test_bit_flip(simulator, p,controlled):
qubit = 0
if controlled:
U = gates.X(target=1)+gates.CX(1,0)
H = paulis.Qm(0)
NM = BitFlip(p, 2)
else:
U = gates.X(target=0)
NM = BitFlip(p, 1)
H = paulis.Qm(qubit)
O = ExpectationValue(U=U, H=H)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, 1.0-p, atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
@pytest.mark.parametrize("angle", numpy.random.uniform(0.,2*numpy.pi,1))
def test_rx_bit_flip_0(simulator, p,angle):
U = gates.Rx(target=0,angle=Variable('a'))
H = paulis.Z(0)
NM = BitFlip(p, 1)
O = ExpectationValue(U=U, H=H)
E = simulate(O,backend=simulator,samples=1,variables={'a':angle},noise=NM)
#assert (numpy.isclose(E, (1-2*p)*numpy.cos(angle), atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
@pytest.mark.parametrize("angle", numpy.random.uniform(0.,2*numpy.pi,1))
def test_rx_bit_flip_1(simulator, p,angle):
qubit = 1
U = gates.X(target=0)+gates.CRx(control=0,target=1,angle=Variable('a'))
H = paulis.Z(1)*paulis.I(0)
NM = BitFlip(p, 2)
O = ExpectationValue(U=U, H=H)
E = simulate(O,backend=simulator,samples=1,variables={'a':angle},noise=NM)
print(E)
print(p+numpy.cos(angle)-p*numpy.cos(angle))
#assert (numpy.isclose(E, p+numpy.cos(angle)-p*numpy.cos(angle), atol=1.e-2))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
def test_double_cnot_bit_flip(simulator, p):
qubit = 1
U = gates.X(0)+gates.X(2)+gates.CX(0,1)+gates.CX(2,1)
H = paulis.Qm(qubit)
O = ExpectationValue(U=U, H=H)
NM = BitFlip(p, 2)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, 2 * (p - p * p), atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
def test_phase_flip(simulator, p):
qubit = 0
H = paulis.X(qubit)
U = gates.H(target=qubit)
O = ExpectationValue(U=U, H=H)
NM=PhaseFlip(p,1)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, 1.0-2*p, atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
@pytest.mark.parametrize("angle", numpy.random.uniform(0.,2*numpy.pi,1))
def test_rz_phase_flip_0(simulator, p,angle):
qubit = 0
H = paulis.Y(qubit)
U = gates.H(target=qubit)+gates.Rz(angle=Variable('a'),target=qubit)+gates.H(target=qubit)
O = ExpectationValue(U=U, H=H)
NM=PhaseFlip(p,1)
E = simulate(O,backend=simulator,variables={'a':angle},samples=1,noise=NM)
print(E)
#assert (numpy.isclose(E, ((-1.+2*p)**3)*numpy.sin(angle), atol=1.e-2))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
@pytest.mark.parametrize("angle", numpy.random.uniform(0.,2*numpy.pi,1))
def test_rz_phase_flip_1(simulator, p,angle):
U = gates.X(target=0) + gates.H(1) + gates.CRz(control=0,target=1,angle=Variable('a')) + gates.H(1)
H = paulis.Z(1)*paulis.I(0)
O = ExpectationValue(U,H)
NM= PhaseFlip(p,2)
E = simulate(O,backend=simulator,variables={'a':angle},samples=1,noise=NM)
print(E)
#assert (numpy.isclose(E, ((1.0-2*p)**2)*numpy.cos(angle), atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
def test_phase_damp(simulator, p):
qubit = 0
H = paulis.X(qubit)
U = gates.H(target=qubit)
O = ExpectationValue(U=U, H=H)
NM=PhaseDamp(p,1)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, numpy.sqrt(1-p), atol=1.e-2))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
def test_amp_damp(simulator, p):
qubit = 0
H = (0.5)*(paulis.I(0)-paulis.Z(0))
U = gates.X(target=qubit)
O = ExpectationValue(U=U, H=H)
NM=AmplitudeDamp(p,1)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, 1-p, atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
def test_phase_amp_damp(simulator, p):
qubit = 0
H = paulis.Z(0)
U = gates.X(target=qubit)
O = ExpectationValue(U=U, H=H)
NM=PhaseAmplitudeDamp(p,1-p,1)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, -1+2*p, atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
def test_phase_amp_damp_is_both(simulator, p):
qubit = 0
H = paulis.Z(0)
U = gates.X(target=qubit)
O = ExpectationValue(U=U, H=H)
NM1=PhaseDamp(1-p,1)+AmplitudeDamp(p,1)
E1 = simulate(O,backend=simulator,samples=1,noise=NM1)
NM2 = PhaseAmplitudeDamp(p,1-p, 1)
E2 =simulate(O,backend=simulator,samples=1,noise=NM2)
#assert (numpy.isclose(E1,E2, atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
@pytest.mark.parametrize('controlled',[False,True])
def test_depolarizing_error(simulator, p,controlled):
cq=1
qubit = 0
H = paulis.Z(0)
if controlled:
U = gates.X(target=cq)+gates.X(target=qubit,control=cq)
NM = DepolarizingError(p, 2)
else:
U= gates.X(target=qubit)
NM = DepolarizingError(p, 1)
O = ExpectationValue(U=U, H=H)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, -1+p, atol=1.e-1))
@pytest.mark.skipif(len(samplers) == 0, reason="Missing necessary backends")
@pytest.mark.parametrize("simulator", samplers)
@pytest.mark.parametrize("p", numpy.random.uniform(0.,1.,1))
def test_repetition_works(simulator, p):
qubit = 0
H = paulis.Qm(qubit)
U = gates.X(target=qubit)+gates.X(target=qubit)
O = ExpectationValue(U=U, H=H)
NM=BitFlip(p,1)
E = simulate(O,backend=simulator,samples=1,noise=NM)
#assert (numpy.isclose(E, 2*(p-p*p), atol=1.e-1))
|
{"hexsha": "6eb563abead4986d8dd67e2ca82eb3b66aac90c1", "size": 7985, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_noise.py", "max_stars_repo_name": "akpc/margarita", "max_stars_repo_head_hexsha": "171fc160b32b60b30ff31e8ec62c7517da3d240d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_noise.py", "max_issues_repo_name": "akpc/margarita", "max_issues_repo_head_hexsha": "171fc160b32b60b30ff31e8ec62c7517da3d240d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-08T13:34:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T06:12:37.000Z", "max_forks_repo_path": "tests/test_noise.py", "max_forks_repo_name": "akpc/margarita", "max_forks_repo_head_hexsha": "171fc160b32b60b30ff31e8ec62c7517da3d240d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4181034483, "max_line_length": 112, "alphanum_fraction": 0.6825297433, "include": true, "reason": "import numpy", "num_tokens": 2502}
|
#include "transaction.h"
#include "base58.h"
#include "bignum.h"
#include "block.h"
#include "checkpoints.h"
#include "init.h"
#include "main.h"
#include "txindex.h"
#include "txmempool.h"
#include "util.h"
#include <boost/foreach.hpp>
void CTransaction::SetNull()
{
nVersion = CTransaction::CURRENT_VERSION;
nTime = GetAdjustedTime();
vin.clear();
vout.clear();
nLockTime = 0;
nDoS = 0; // Denial-of-service prevention
}
uint256 CTransaction::GetHash() const { return SerializeHash(*this); }
bool CTransaction::IsNewerThan(const CTransaction& old) const
{
if (vin.size() != old.vin.size())
return false;
for (unsigned int i = 0; i < vin.size(); i++)
if (vin[i].prevout != old.vin[i].prevout)
return false;
bool fNewer = false;
unsigned int nLowest = std::numeric_limits<unsigned int>::max();
for (unsigned int i = 0; i < vin.size(); i++) {
if (vin[i].nSequence != old.vin[i].nSequence) {
if (vin[i].nSequence <= nLowest) {
fNewer = false;
nLowest = vin[i].nSequence;
}
if (old.vin[i].nSequence < nLowest) {
fNewer = true;
nLowest = old.vin[i].nSequence;
}
}
}
return fNewer;
}
bool CTransaction::IsCoinStake() const
{
// ppcoin: the coin stake transaction is marked with the first output empty
return (vin.size() > 0 && (!vin[0].prevout.IsNull()) && vout.size() >= 2 && vout[0].IsEmpty());
}
bool CTransaction::CheckColdStake(const CScript& script) const
{
// tx is a coinstake tx
if (!IsCoinStake())
return false;
if (vin.empty())
return false;
const boost::optional<std::vector<uint8_t>> firstPubKey =
vin[0].scriptSig.GetPubKeyOfP2CSScriptSig();
if (!firstPubKey)
return false; // this is not P2CS
// all inputs must be P2CS and must be paying to the same pubkey
for (unsigned int i = 1; i < vin.size(); i++) {
if (vin[i].scriptSig.GetPubKeyOfP2CSScriptSig() != firstPubKey)
return false;
}
// all outputs except first (coinstake marker)
// have the same pubKeyScript and it matches the script we are spending
for (unsigned int i = 1; i < vout.size(); i++)
if (vout[i].scriptPubKey != script)
return false;
return true;
}
bool CTransaction::HasP2CSOutputs() const
{
for (const CTxOut& txout : vout) {
if (txout.scriptPubKey.IsPayToColdStaking())
return true;
}
return false;
}
CAmount CTransaction::GetValueOut() const
{
CAmount nValueOut = 0;
BOOST_FOREACH (const CTxOut& txout, vout) {
nValueOut += txout.nValue;
if (!MoneyRange(txout.nValue) || !MoneyRange(nValueOut))
throw std::runtime_error("CTransaction::GetValueOut() : value out of range");
}
return nValueOut;
}
std::string CTransaction::ToStringShort() const
{
std::string str;
str += fmt::format("{} {}", GetHash().ToString().c_str(),
IsCoinBase() ? "base" : (IsCoinStake() ? "stake" : "user"));
return str;
}
std::string CTransaction::ToString() const
{
std::string str;
str += IsCoinBase() ? "Coinbase" : (IsCoinStake() ? "Coinstake" : "CTransaction");
str += fmt::format("(hash={}, nTime={}, ver={}, vin.size={}, vout.size={}, nLockTime={})\n",
GetHash().ToString().substr(0, 10), nTime, nVersion, vin.size(), vout.size(),
nLockTime);
for (unsigned int i = 0; i < vin.size(); i++)
str += " " + vin[i].ToString() + "\n";
for (unsigned int i = 0; i < vout.size(); i++)
str += " " + vout[i].ToString() + "\n";
return str;
}
void CTransaction::print() const { NLog.write(b_sev::info, "{}", ToString()); }
bool CTransaction::ReadFromDisk(const ITxDB& txdb, COutPoint prevout, CTxIndex& txindexRet)
{
SetNull();
if (!txdb.ReadTxIndex(prevout.hash, txindexRet))
return fDebug ? NLog.error("Tx index not found for tx {}", prevout.hash.ToString()) : false;
if (!txdb.ReadTx(txindexRet.pos, *this))
return fDebug ? NLog.error("Tx not found for tx {}", prevout.hash.ToString()) : false;
if (prevout.n >= vout.size()) {
SetNull();
return fDebug ? NLog.error("Invalid prevout with n >= vout.size()", prevout.hash.ToString())
: false;
}
return true;
}
bool CTransaction::ReadFromDisk(CTxDB& txdb, COutPoint prevout)
{
CTxIndex txindex;
return ReadFromDisk(txdb, prevout, txindex);
}
//
// Check transaction inputs, and make sure any
// pay-to-script-hash transactions are evaluating IsStandard scripts
//
// Why bother? To avoid denial-of-service attacks; an attacker
// can submit a standard HASH... OP_EQUAL transaction,
// which will get accepted into blocks. The redemption
// script can be anything; an attacker could use a very
// expensive-to-check-upon-redemption script like:
// DUP CHECKSIG DROP ... repeated 100 times... OP_1
//
bool CTransaction::AreInputsStandard(const MapPrevTx& mapInputs) const
{
if (IsCoinBase())
return true; // Coinbases don't use vin normally
for (unsigned int i = 0; i < vin.size(); i++) {
const CTxOut& prev = GetOutputFor(vin[i], mapInputs);
std::vector<std::vector<unsigned char>> vSolutions;
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript& prevScript = prev.scriptPubKey;
if (!Solver(CTxDB(), prevScript, whichType, vSolutions))
return false;
// Transactions with extra stuff in their scriptSigs are
// non-standard. Note that this EvalScript() call will
// be quick, because if there are any operations
// beside "push data" in the scriptSig the
// IsStandard() call returns false
std::vector<std::vector<unsigned char>> stack;
if (EvalScript(stack, vin[i].scriptSig, *this, i, false, 0).isErr())
return false;
if (whichType == TX_SCRIPTHASH) {
if (stack.empty())
return false;
CScript subscript(stack.back().begin(), stack.back().end()); // Get the redeemScript
// Removed the check to make sure the redeemScript subscript fits one of the four standard
// transaction types Instead, make sure that the redeemScript doesn't have too many signature
// check Ops
if (subscript.GetSigOpCount(true) > MAX_P2SH_SIGOPS) {
return false;
}
} else {
// Not a TX_SCRIPTHASH scriptPubKey
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
if (nArgsExpected < 0)
return false;
// If stack is different than expected, not standard
if (stack.size() != (unsigned int)nArgsExpected)
return false;
}
}
return true;
}
unsigned int CTransaction::GetLegacySigOpCount() const
{
unsigned int nSigOps = 0;
for (const CTxIn& txin : vin) {
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
for (const CTxOut& txout : vout) {
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
Result<void, TxValidationState> CTransaction::CheckTransaction(const ITxDB& txdb,
CBlock* sourceBlockPtr) const
{
// Basic checks that don't depend on any context
if (vin.empty()) {
DoS(10, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-vin-empty"));
}
if (vout.empty()) {
DoS(10, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-vout-empty"));
}
// Size limits
unsigned int nSizeLimit = MaxBlockSize(txdb);
if (::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > nSizeLimit) {
DoS(100, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-oversize"));
}
// Check for negative or overflow output values
CAmount nValueOut = 0;
for (unsigned int i = 0; i < vout.size(); i++) {
const CTxOut& txout = vout[i];
if (txout.IsEmpty() && !IsCoinBase() && !IsCoinStake()) {
DoS(100, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "txout-empty-for-tx"));
}
if (txout.nValue < 0) {
DoS(100, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-vout-negative"));
}
if (txout.nValue > MAX_MONEY) {
DoS(100, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-vout-toolarge"));
}
nValueOut += txout.nValue;
if (!MoneyRange(nValueOut)) {
DoS(100, false);
return Err(
MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-txouttotal-toolarge"));
}
// check cold staking enforcement (for delegations) and value out
if (txout.scriptPubKey.IsPayToColdStaking()) {
if (txout.nValue < Params().MinColdStakingAmount()) {
DoS(100, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS,
"bad-txns-coldstake-low-amount"));
}
}
}
// Check for duplicate inputs
{
std::set<COutPoint> vInOutPoints;
for (const CTxIn& txin : vin) {
if (vInOutPoints.find(txin.prevout) != vInOutPoints.cend()) {
if (sourceBlockPtr) {
sourceBlockPtr->reject = CBlockReject(REJECT_INVALID, "bad-txns-inputs-duplicate",
sourceBlockPtr->GetHash());
}
return Err(
MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-inputs-duplicate"));
}
vInOutPoints.insert(txin.prevout);
}
}
if (IsCoinBase()) {
if (vin[0].scriptSig.size() < 2 || vin[0].scriptSig.size() > 100) {
if (sourceBlockPtr) {
sourceBlockPtr->reject =
CBlockReject(REJECT_INVALID, "bad-cb-length", sourceBlockPtr->GetHash());
}
DoS(100, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-cb-length"));
}
} else {
for (const CTxIn& txin : vin)
if (txin.prevout.IsNull()) {
DoS(10, false);
return Err(
MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-prevout-null"));
}
}
return Ok();
}
CAmount CTransaction::GetMinFee(const ITxDB& txdb, unsigned int nBlockSize, enum GetMinFee_mode mode,
unsigned int nBytes) const
{
// Base fee is either MIN_TX_FEE or MIN_RELAY_TX_FEE
CAmount nBaseFee = (mode == GMF_RELAY) ? MIN_RELAY_TX_FEE : MIN_TX_FEE;
unsigned int nNewBlockSize = nBlockSize + nBytes;
CAmount nMinFee = (1 + (CAmount)nBytes / 1000) * nBaseFee;
// To limit dust spam, require MIN_TX_FEE/MIN_RELAY_TX_FEE if any output is less than 0.01
if (nMinFee < nBaseFee) {
for (const CTxOut& txout : vout)
if (txout.nValue < CENT)
nMinFee = nBaseFee;
}
// Raise the price as the block approaches full
unsigned int nSizeLimit = MaxBlockSize(txdb);
if (nBlockSize != 1 && nNewBlockSize >= nSizeLimit / 2) {
if (nNewBlockSize >= nSizeLimit)
return MAX_MONEY;
nMinFee *= nSizeLimit / (nSizeLimit - nNewBlockSize);
}
if (!MoneyRange(nMinFee))
nMinFee = MAX_MONEY;
return nMinFee;
}
bool CTransaction::ReadFromDisk(CDiskTxPos pos, const ITxDB& txdb) { return txdb.ReadTx(pos, *this); }
bool CTransaction::DisconnectInputs(CTxDB& txdb)
{
// Relinquish previous transactions' spent pointers
if (!IsCoinBase()) {
for (const CTxIn& txin : vin) {
COutPoint prevout = txin.prevout;
// Get prev txindex from disk
CTxIndex txindex;
if (!txdb.ReadTxIndex(prevout.hash, txindex))
return NLog.error("DisconnectInputs() : ReadTxIndex failed");
if (prevout.n >= txindex.vSpent.size())
return NLog.error("DisconnectInputs() : prevout.n out of range");
// Mark outpoint as not spent
txindex.vSpent[prevout.n].SetNull();
// Write back
if (!txdb.UpdateTxIndex(prevout.hash, txindex))
return NLog.error("DisconnectInputs() : UpdateTxIndex failed");
}
}
// Remove transaction from index
// This can fail if a duplicate of this transaction was in a chain that got
// reorganized away. This is only possible if this transaction was completely
// spent, so erasing it would be a no-op anyway.
txdb.EraseTxIndex(this->GetHash());
return true;
}
bool CTransaction::FetchInputs(const ITxDB& txdb, const std::map<uint256, CTxIndex>& mapTestPool,
bool fBlock, bool fMiner, MapPrevTx& inputsRet, bool& fInvalid) const
{
// FetchInputs can return false either because we just haven't seen some inputs
// (in which case the transaction should be stored as an orphan)
// or because the transaction is malformed (in which case the transaction should
// be dropped). If tx is definitely invalid, fInvalid will be set to true.
fInvalid = false;
if (IsCoinBase())
return true; // Coinbase transactions have no inputs to fetch.
for (unsigned int i = 0; i < vin.size(); i++) {
COutPoint prevout = vin[i].prevout;
if (inputsRet.count(prevout.hash))
continue; // Got it already
// Read txindex
CTxIndex& txindex = inputsRet[prevout.hash].first;
bool fFound = true;
if ((fBlock || fMiner) && mapTestPool.count(prevout.hash)) {
// Get txindex from current proposed changes
txindex = mapTestPool.find(prevout.hash)->second;
} else {
// Read txindex from txdb
fFound = txdb.ReadTxIndex(prevout.hash, txindex);
}
if (!fFound && (fBlock || fMiner))
return fMiner ? false
: NLog.error("FetchInputs() : {} prev tx {} index entry not found",
GetHash().ToString(), prevout.hash.ToString());
// Read txPrev
CTransaction& txPrev = inputsRet[prevout.hash].second;
if (!fFound || txindex.pos == CDiskTxPos(1, 1)) {
// Get prev tx from single transactions in memory
if (!mempool.lookup(prevout.hash, txPrev))
return NLog.error("FetchInputs() : {} mempool Tx prev not found {}",
GetHash().ToString(), prevout.hash.ToString());
if (!fFound)
txindex.vSpent.resize(txPrev.vout.size());
} else {
// Get prev tx from disk
if (!txPrev.ReadFromDisk(txindex.pos, txdb))
return NLog.error("FetchInputs() : {} ReadFromDisk prev tx {} failed",
GetHash().ToString(), prevout.hash.ToString());
}
}
// Make sure all prevout.n indexes are valid:
for (unsigned int i = 0; i < vin.size(); i++) {
const COutPoint prevout = vin[i].prevout;
assert(inputsRet.count(prevout.hash) != 0);
const CTxIndex& txindex = inputsRet[prevout.hash].first;
const CTransaction& txPrev = inputsRet[prevout.hash].second;
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size()) {
// Revisit this if/when transaction replacement is implemented and allows
// adding inputs:
fInvalid = true;
return DoS(100,
NLog.error("FetchInputs() : {} prevout.n out of range {} {} {}"
" prev tx {}\n{}",
GetHash().ToString(), prevout.n, txPrev.vout.size(),
txindex.vSpent.size(), prevout.hash.ToString(), txPrev.ToString()));
}
}
return true;
}
const CTxOut& CTransaction::GetOutputFor(const CTxIn& input, const MapPrevTx& inputs) const
{
MapPrevTx::const_iterator mi = inputs.find(input.prevout.hash);
if (mi == inputs.end())
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.hash not found");
const CTransaction& txPrev = (mi->second).second;
if (input.prevout.n >= txPrev.vout.size())
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.n out of range");
return txPrev.vout[input.prevout.n];
}
CAmount CTransaction::GetValueIn(const MapPrevTx& inputs) const
{
if (IsCoinBase())
return 0;
CAmount nResult = 0;
for (unsigned int i = 0; i < vin.size(); i++) {
nResult += GetOutputFor(vin[i], inputs).nValue;
}
return nResult;
}
unsigned int CTransaction::GetP2SHSigOpCount(const MapPrevTx& inputs) const
{
if (IsCoinBase())
return 0;
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < vin.size(); i++) {
const CTxOut& prevout = GetOutputFor(vin[i], inputs);
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(vin[i].scriptSig);
}
return nSigOps;
}
Result<void, TxValidationState>
CTransaction::ConnectInputs(const ITxDB& txdb, MapPrevTx inputs,
std::map<uint256, CTxIndex>& mapTestPool, const CDiskTxPos& posThisTx,
const boost::optional<CBlockIndex>& pindexBlock, bool fBlock, bool fMiner,
CBlock* sourceBlockPtr) const
{
// Take over previous transactions' spent pointers
// fBlock is true when this is called from AcceptBlock when a new best-block is added to the
// blockchain fMiner is true when called from the internal bitcoin miner
// ... both are false when called from CTransaction::AcceptToMemoryPool
if (!IsCoinBase()) {
const int nCbM = Params().CoinbaseMaturity(txdb);
CAmount nValueIn = 0;
CAmount nFees = 0;
for (unsigned int i = 0; i < vin.size(); i++) {
COutPoint prevout = vin[i].prevout;
assert(inputs.count(prevout.hash) > 0);
CTxIndex& txindex = inputs[prevout.hash].first;
CTransaction& txPrev = inputs[prevout.hash].second;
static_assert(std::is_same<uint256, decltype(txindex.pos.nBlockPos)>::value,
"Expected same types");
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size()) {
DoS(100, false);
return Err(MakeInvalidTxState(
TxValidationResult::TX_INVALID_INPUTS, "bad-txns-inputs-invalid",
fmt::format("ConnectInputs() : {} prevout.n out of range {} {} {}"
" prev tx {}\n{}",
GetHash().ToString(), prevout.n, txPrev.vout.size(),
txindex.vSpent.size(), prevout.hash.ToString(), txPrev.ToString())));
}
// If prev is coinbase or coinstake, check that it's matured
if (txPrev.IsCoinBase() || txPrev.IsCoinStake()) {
const boost::optional<CBlockIndex> inputIndex =
txdb.ReadBlockIndex(txindex.pos.nBlockPos);
// failed to read/find block in db
if (!inputIndex) {
if (sourceBlockPtr) {
sourceBlockPtr->reject =
CBlockReject(REJECT_INVALID,
"bad-txns-premature-spend-of-coinbase/coinstake-check-failed",
sourceBlockPtr->GetHash());
}
const std::string msg =
"bad-txns-premature-spend-of-coinbase/coinstake-check-failed";
return Err(MakeInvalidTxState(
TxValidationResult::TX_PREMATURE_SPEND_CHECK_ERROR, msg,
fmt::format("ConnectInputs() : Failed to check coinbase/coinstake block "
"maturity for spend; reading block {} failed",
txindex.pos.nBlockPos.ToString())));
}
// check if spent before maturity
if (pindexBlock->nHeight - inputIndex->nHeight < nCbM) {
if (sourceBlockPtr) {
sourceBlockPtr->reject = CBlockReject(
REJECT_INVALID, "bad-txns-premature-spend-of-coinbase/coinstake",
sourceBlockPtr->GetHash());
}
const std::string msg = txPrev.IsCoinBase()
? "bad-txns-premature-spend-of-coinbase"
: "bad-txns-premature-spend-of-coinstake";
return Err(
MakeInvalidTxState(TxValidationResult::TX_PREMATURE_SPEND, msg,
fmt::format("ConnectInputs() : tried to spend {} at depth {}",
txPrev.IsCoinBase() ? "coinbase" : "coinstake",
pindexBlock->nHeight - inputIndex->nHeight)));
}
}
// ppcoin: check transaction timestamp
if (txPrev.nTime > nTime) {
DoS(100, false);
return Err(MakeInvalidTxState(
TxValidationResult::TX_CONSENSUS, "bad-txns-input-time-order",
"ConnectInputs() : transaction timestamp earlier than input transaction"));
}
// Check for negative or overflow input values
nValueIn += txPrev.vout[prevout.n].nValue;
if (!MoneyRange(txPrev.vout[prevout.n].nValue) || !MoneyRange(nValueIn)) {
DoS(100, false);
return Err(MakeInvalidTxState(TxValidationResult::TX_CONSENSUS,
"bad-txns-inputvalues-outofrange",
"ConnectInputs() : txin values out of range"));
}
}
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
for (unsigned int i = 0; i < vin.size(); i++) {
COutPoint prevout = vin[i].prevout;
assert(inputs.count(prevout.hash) > 0);
CTxIndex& txindex = inputs[prevout.hash].first;
CTransaction& txPrev = inputs[prevout.hash].second;
// Check for conflicts (double-spend)
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!txindex.vSpent[prevout.n].IsNull()) {
const auto code = TxValidationResult::TX_MISSING_INPUTS;
const auto msg = "bad-txns-inputs-missingorspent";
if (fMiner) {
return Err(MakeInvalidTxState(code, msg));
}
return Err(MakeInvalidTxState(
code, msg,
fmt::format("ConnectInputs() : {} prev tx already used at {}", GetHash().ToString(),
txindex.vSpent[prevout.n].ToString())));
}
// Skip ECDSA signature verification when connecting blocks (fBlock=true)
// before the last blockchain checkpoint. This is safe because block merkle hashes are
// still computed and checked, and any change will be caught at the next checkpoint.
if (!(fBlock &&
(txdb.GetBestChainHeight().value_or(0) < Checkpoints::GetTotalBlocksEstimate()))) {
// Verify signature
bool fStrictPayToScriptHash = true;
const auto verifyRes =
VerifySignature(txPrev, *this, i, fStrictPayToScriptHash, false, 0);
if (verifyRes.isErr()) {
// only during transition phase for P2SH: do not invoke anti-DoS code for
// potentially old clients relaying bad P2SH transactions
if (fStrictPayToScriptHash) {
const auto verifyResP2SH = VerifySignature(txPrev, *this, i, false, false, 0);
if (verifyResP2SH.isOk()) {
return Err(MakeInvalidTxState(
TxValidationResult::TX_NOT_STANDARD,
fmt::format("non-mandatory-script-verify-flag ({})",
ScriptErrorString(verifyResP2SH.unwrapErr(RESULT_PRE))),
fmt::format("ConnectInputs() : {} P2SH VerifySignature failed",
GetHash().ToString())));
}
}
const std::string msg =
fmt::format("mandatory-script-verify-flag-failed ({})",
ScriptErrorString(verifyRes.unwrapErr(RESULT_PRE)));
if (sourceBlockPtr) {
sourceBlockPtr->reject =
CBlockReject(REJECT_INVALID, msg, sourceBlockPtr->GetHash());
}
this->reject = CTransaction::CTxReject(REJECT_INVALID, msg, GetHash());
DoS(100, false);
return Err(
MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, msg,
fmt::format("ConnectInputs() : {} VerifySignature failed",
GetHash().ToString())));
}
}
// Mark outpoints as spent
txindex.vSpent[prevout.n] = posThisTx;
// Write back
if (fBlock || fMiner) {
mapTestPool[prevout.hash] = txindex;
}
}
if (!IsCoinStake()) {
if (nValueIn < GetValueOut()) {
if (sourceBlockPtr) {
sourceBlockPtr->reject =
CBlockReject(REJECT_INVALID, "bad-txns-in-belowout", sourceBlockPtr->GetHash());
}
DoS(100, false);
return Err(
MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-in-belowout",
fmt::format("ConnectInputs() : {} value in ({}) < value out ({})",
GetHash().ToString(), nValueIn, GetValueOut())));
}
// Tally transaction fees
CAmount nTxFee = nValueIn - GetValueOut();
if (nTxFee < 0) {
DoS(100, false);
return Err(MakeInvalidTxState(
TxValidationResult::TX_CONSENSUS, "bad-txns-fee-outofrange1",
fmt::format("ConnectInputs() : {} nTxFee < 0", GetHash().ToString())));
}
// enforce transaction fees for every block
if (nTxFee < GetMinFee(txdb)) {
if (fBlock) {
DoS(100, NLog.error("ConnectInputs() : {} not paying required fee={}, paid={}",
GetHash().ToString(), FormatMoney(GetMinFee(txdb)),
FormatMoney(nTxFee)));
}
return Err(MakeInvalidTxState(
TxValidationResult::TX_CONSENSUS, "bad-txns-fee-outofrange2",
fmt::format("ConnectInputs() : {} nTxFee < 0", GetHash().ToString())));
}
nFees += nTxFee;
if (!MoneyRange(nFees)) {
return Err(
MakeInvalidTxState(TxValidationResult::TX_CONSENSUS, "bad-txns-fee-outofrange3"));
}
}
}
return Ok();
}
// ppcoin: total coin age spent in transaction, in the unit of coin-days.
// Only those coins meeting minimum age requirement counts. As those
// transactions not in main chain are not currently indexed so we
// might not find out about their coin age. Older transactions are
// guaranteed to be in main chain by sync-checkpoint. This rule is
// introduced to help nodes establish a consistent view of the coin
// age (trust score) of competing branches.
bool CTransaction::GetCoinAge(const ITxDB& txdb, uint64_t& nCoinAge) const
{
CBigNum bnCentSecond = 0; // coin age in the unit of cent-seconds
unsigned int nSMA = Params().StakeMinAge(txdb);
nCoinAge = 0;
if (IsCoinBase())
return true;
for (const CTxIn& txin : vin) {
// First try finding the previous transaction in database
CTransaction txPrev;
CTxIndex txindex;
if (!txPrev.ReadFromDisk(txdb, txin.prevout, txindex))
continue; // previous transaction not in main chain
if (nTime < txPrev.nTime)
return false; // Transaction timestamp violation
// Read block header
CBlock block;
if (!block.ReadFromDisk(txindex.pos.nBlockPos, txdb, false))
return false; // unable to read block of previous transaction
if (block.GetBlockTime() + nSMA > nTime)
continue; // only count coins meeting min age requirement
CAmount nValueIn = txPrev.vout[txin.prevout.n].nValue;
bnCentSecond += CBigNum(nValueIn) * (nTime - txPrev.nTime) / CENT;
if (fDebug)
NLog.write(b_sev::debug, "coin age nValueIn={} nTimeDiff={} bnCentSecond={}", nValueIn,
nTime - txPrev.nTime, bnCentSecond.ToString());
}
CBigNum bnCoinDay = bnCentSecond * CENT / COIN / (24 * 60 * 60);
if (fDebug)
NLog.write(b_sev::debug, "coin age bnCoinDay={}", bnCoinDay.ToString());
nCoinAge = bnCoinDay.getuint64();
return true;
}
CTransaction CTransaction::FetchTxFromDisk(const uint256& txid)
{
CTxDB txdb;
return FetchTxFromDisk(txid, txdb);
}
CTransaction CTransaction::FetchTxFromDisk(const uint256& txid, const ITxDB& txdb)
{
CTransaction result;
CTxIndex txPos;
if (!txdb.ReadTxIndex(txid, txPos)) {
NLog.write(b_sev::err, "Unable to read standard transaction from db: {}", txid.ToString());
throw std::runtime_error("Unable to read standard transaction from db: " + txid.ToString());
}
if (!result.ReadFromDisk(txPos.pos, txdb)) {
NLog.write(b_sev::err,
"Unable to read standard transaction from disk with the "
"index given by db: {}",
txid.ToString());
throw std::runtime_error("Unable to read standard transaction from db: " + txid.ToString());
}
return result;
}
/**
* Get all relevant keys of a transaction that are in our wallet. If an output number is defined, only
* the key of that output is returned.
* @brief CTransaction::GetThisWalletKeysOfTx
* @param txid
* @param outputNumber
* @return the keys of the transaction given by txid that are in our wallet. If outputNumber is defined,
* only that key will be returned, if it exists in our wallet
*/
std::vector<CKey> CTransaction::GetThisWalletKeysOfTx(const uint256& txid,
boost::optional<unsigned> outputNumber)
{
CTransaction tx;
// first we try to find it in the mempool, if not found, we look on disk
bool foundInMempool = mempool.lookup(txid, tx);
if (!foundInMempool) {
tx = CTransaction::FetchTxFromDisk(txid);
}
std::vector<CKey> keys;
for (unsigned i = 0; i < tx.vout.size(); i++) {
if (outputNumber.is_initialized() && *outputNumber != i) {
continue;
}
const CTxOut& out = tx.vout[i];
txnouttype outtype;
std::vector<std::vector<uint8_t>> vSolutions;
// this solution can be improved later for multiple kinds of transactions, here we only support
// P2PKH transactions, more in CScript class's source file
Solver(CTxDB(), out.scriptPubKey, outtype, vSolutions);
if (outtype == TX_PUBKEYHASH) {
CKeyID keyId = CKeyID(uint160(vSolutions[0]));
if (!CBitcoinAddress(keyId).IsValid()) {
continue;
}
CKey key;
if (!pwalletMain->GetKey(keyId, key)) {
continue;
}
// this is O(N^2), but this is OK, because the number of outputs is low
// we're comparing public keys because CKey objects are not comparable
if (std::find_if(keys.cbegin(), keys.cend(), [&key](const CKey& k) {
return k.GetPubKey() == key.GetPubKey();
}) == keys.cend()) {
keys.push_back(key);
}
}
}
// we don't retrieve input keys if the key of a specific output number is requested
if (!outputNumber.is_initialized()) {
for (unsigned i = 0; i < tx.vin.size(); i++) {
const CTxIn& in = tx.vin[i];
boost::optional<CKey> pubKey = CTransaction::GetPublicKeyFromScriptSig(in.scriptSig);
if (!pubKey.is_initialized()) {
continue;
}
CKey key;
CKeyID keyId = pubKey->GetPubKey().GetID();
if (!pwalletMain->GetKey(keyId, key)) {
continue;
}
// this is O(N^2), but this is OK, because the numebr of inputs is low
// we're comparing public keys because CKey objects are not comparable
if (std::find_if(keys.cbegin(), keys.cend(), [&key](const CKey& k) {
return k.GetPubKey() == key.GetPubKey();
}) == keys.cend()) {
keys.push_back(key);
}
}
}
return keys;
}
std::string CTransaction::DecryptMetadataOfTx(const StringViewT metadataStr, const uint256& txid,
boost::optional<std::string>& error)
{
std::vector<CKey> keysVec = GetThisWalletKeysOfTx(txid);
if (keysVec.empty()) {
error = "No valid keys were found in the transaction: " + txid.ToString();
return "";
}
std::string decryptedMessage;
for (const CKey& key : keysVec) {
try {
decryptedMessage = NTP1Script::DecryptMetadata(metadataStr, key);
break;
} catch (...) {
}
}
if (decryptedMessage.empty()) {
error = "None of the available keys in the following txid: " + txid.ToString() +
" were able to decrypted the message: " + metadataStr.to_string();
return "";
}
return decryptedMessage;
}
boost::optional<CKey> CTransaction::GetPublicKeyFromScriptSig(const CScript& scriptSig)
{
opcodetype opt;
auto beg = scriptSig.cbegin();
std::vector<unsigned char> vchSig, vchPub;
if (!scriptSig.GetOp(beg, opt, vchSig)) {
return boost::none;
}
if (!scriptSig.GetOp(beg, opt, vchPub)) {
return boost::none;
}
if (vchSig.empty() || !IsCanonicalSignature(vchSig)) {
return boost::none;
}
if (vchPub.empty() || !IsCanonicalPubKey(vchPub)) {
return boost::none;
}
CKey resultKey;
if (!resultKey.SetPubKey(vchPub)) {
return boost::none;
}
return resultKey;
}
boost::optional<CKey> CTransaction::GetOnePublicKeyFromInputs(const CTransaction& tx)
{
for (const CTxIn& in : tx.vin) {
boost::optional<CKey> res = GetPublicKeyFromScriptSig(in.scriptSig);
if (res.is_initialized()) {
return res;
}
}
return boost::none;
}
|
{"hexsha": "99d0d4c741f8ac13585e91ed528f4a294c9d28f9", "size": 36571, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "wallet/transaction.cpp", "max_stars_repo_name": "NeblioTeam/neblio", "max_stars_repo_head_hexsha": "12d5fda35b528c57eee29098de63607bda419693", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 138.0, "max_stars_repo_stars_event_min_datetime": "2017-08-13T18:55:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-19T04:05:25.000Z", "max_issues_repo_path": "wallet/transaction.cpp", "max_issues_repo_name": "NeblioTeam/neblio", "max_issues_repo_head_hexsha": "12d5fda35b528c57eee29098de63607bda419693", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 202.0, "max_issues_repo_issues_event_min_datetime": "2017-07-25T23:09:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T15:01:08.000Z", "max_forks_repo_path": "wallet/transaction.cpp", "max_forks_repo_name": "NeblioTeam/neblio", "max_forks_repo_head_hexsha": "12d5fda35b528c57eee29098de63607bda419693", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 65.0, "max_forks_repo_forks_event_min_datetime": "2017-08-22T12:28:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T01:28:00.000Z", "avg_line_length": 40.4099447514, "max_line_length": 105, "alphanum_fraction": 0.5631237866, "num_tokens": 8701}
|
using Distributions, Compat.Test, Random, LinearAlgebra
using Distributions: Product
@testset "Testing Product distributions" begin
let
rng, D = MersenneTwister(123456), 11
# Construct independent distributions and `Product` distribution from these.
μ = randn(rng, D)
ds = Normal.(μ, 1.0)
x = rand.(Ref(rng), ds)
d_product = Product(ds)
# Check that methods for `Product` are consistent.
@test length(d_product) == length(ds)
@test logpdf(d_product, x) ≈ sum(logpdf.(ds, x))
@test mean(d_product) == mean.(ds)
@test var(d_product) == var.(ds)
@test cov(d_product) == Diagonal(var.(ds))
@test entropy(d_product) == sum(entropy.(ds))
end
end
|
{"hexsha": "d3f050e1ad6108ca0c282ce8e1df8ca6a0516cac", "size": 695, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/product.jl", "max_stars_repo_name": "jarredbarber/Distributions.jl", "max_stars_repo_head_hexsha": "a6a9ef2799e8772feb0809b4396149d5b6e9a244", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/product.jl", "max_issues_repo_name": "jarredbarber/Distributions.jl", "max_issues_repo_head_hexsha": "a6a9ef2799e8772feb0809b4396149d5b6e9a244", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/product.jl", "max_forks_repo_name": "jarredbarber/Distributions.jl", "max_forks_repo_head_hexsha": "a6a9ef2799e8772feb0809b4396149d5b6e9a244", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2173913043, "max_line_length": 80, "alphanum_fraction": 0.6705035971, "num_tokens": 198}
|
[STATEMENT]
lemma zsplit0_I:
"\<And>n a. zsplit0 t = (n, a) \<Longrightarrow>
(Inum ((x::int) # bs) (CN 0 n a) = Inum (x # bs) t) \<and> numbound0 a"
(is "\<And>n a. ?S t = (n,a) \<Longrightarrow> (?I x (CN 0 n a) = ?I x t) \<and> ?N a")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n a. zsplit0 t = (n, a) \<Longrightarrow> Inum (x # bs) (CN 0 n a) = Inum (x # bs) t \<and> numbound0 a
[PROOF STEP]
proof (induct t rule: zsplit0.induct)
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. \<And>c n a. zsplit0 (C c) = (n, a) \<Longrightarrow> Inum (x # bs) (CN 0 n a) = Inum (x # bs) (C c) \<and> numbound0 a
2. \<And>n na a. zsplit0 (Bound n) = (na, a) \<Longrightarrow> Inum (x # bs) (CN 0 na a) = Inum (x # bs) (Bound n) \<and> numbound0 a
3. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
4. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
5. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
6. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
7. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
case (1 c n a)
[PROOF STATE]
proof (state)
this:
zsplit0 (C c) = (n, a)
goal (7 subgoals):
1. \<And>c n a. zsplit0 (C c) = (n, a) \<Longrightarrow> Inum (x # bs) (CN 0 n a) = Inum (x # bs) (C c) \<and> numbound0 a
2. \<And>n na a. zsplit0 (Bound n) = (na, a) \<Longrightarrow> Inum (x # bs) (CN 0 na a) = Inum (x # bs) (Bound n) \<and> numbound0 a
3. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
4. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
5. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
6. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
7. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
zsplit0 (C c) = (n, a)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
zsplit0 (C c) = (n, a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a) = Inum (x # bs) (C c) \<and> numbound0 a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a) = Inum (x # bs) (C c) \<and> numbound0 a
goal (6 subgoals):
1. \<And>n na a. zsplit0 (Bound n) = (na, a) \<Longrightarrow> Inum (x # bs) (CN 0 na a) = Inum (x # bs) (Bound n) \<and> numbound0 a
2. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
3. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
5. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
6. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (6 subgoals):
1. \<And>n na a. zsplit0 (Bound n) = (na, a) \<Longrightarrow> Inum (x # bs) (CN 0 na a) = Inum (x # bs) (Bound n) \<and> numbound0 a
2. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
3. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
5. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
6. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
case (2 m n a)
[PROOF STATE]
proof (state)
this:
zsplit0 (Bound m) = (n, a)
goal (6 subgoals):
1. \<And>n na a. zsplit0 (Bound n) = (na, a) \<Longrightarrow> Inum (x # bs) (CN 0 na a) = Inum (x # bs) (Bound n) \<and> numbound0 a
2. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
3. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
5. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
6. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
zsplit0 (Bound m) = (n, a)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
zsplit0 (Bound m) = (n, a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Bound m) \<and> numbound0 a
[PROOF STEP]
by (cases "m = 0") auto
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Bound m) \<and> numbound0 a
goal (5 subgoals):
1. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
2. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
5. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
2. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
5. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
case (3 m i a n a')
[PROOF STATE]
proof (state)
this:
zsplit0 a = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) a \<and> numbound0 ?a
zsplit0 (CN m i a) = (n, a')
goal (5 subgoals):
1. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
2. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
5. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?j = "fst (zsplit0 a)"
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
2. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
5. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?b = "snd (zsplit0 a)"
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
2. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
5. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have abj: "zsplit0 a = (?j, ?b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zsplit0 a = (fst (zsplit0 a), snd (zsplit0 a))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
zsplit0 a = (fst (zsplit0 a), snd (zsplit0 a))
goal (5 subgoals):
1. \<And>n i a na aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (CN n i a) = (na, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 na aa) = Inum (x # bs) (CN n i a) \<and> numbound0 aa
2. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
4. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
5. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
proof (cases "m = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
2. m \<noteq> 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
m \<noteq> 0
goal (2 subgoals):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
2. m \<noteq> 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
with 3(1)[OF abj] 3(2)
[PROOF STATE]
proof (chain)
picking this:
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
zsplit0 (CN m i a) = (n, a')
m \<noteq> 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
zsplit0 (CN m i a) = (n, a')
m \<noteq> 0
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
by (auto simp add: Let_def split_def)
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
case m: True
[PROOF STATE]
proof (state)
this:
m = 0
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
with abj
[PROOF STATE]
proof (chain)
picking this:
zsplit0 a = (fst (zsplit0 a), snd (zsplit0 a))
m = 0
[PROOF STEP]
have th: "a' = ?b \<and> n = i + ?j"
[PROOF STATE]
proof (prove)
using this:
zsplit0 a = (fst (zsplit0 a), snd (zsplit0 a))
m = 0
goal (1 subgoal):
1. a' = snd (zsplit0 a) \<and> n = i + fst (zsplit0 a)
[PROOF STEP]
using 3
[PROOF STATE]
proof (prove)
using this:
zsplit0 a = (fst (zsplit0 a), snd (zsplit0 a))
m = 0
zsplit0 a = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) a \<and> numbound0 ?a
zsplit0 (CN m i a) = (n, a')
goal (1 subgoal):
1. a' = snd (zsplit0 a) \<and> n = i + fst (zsplit0 a)
[PROOF STEP]
by (simp add: Let_def split_def)
[PROOF STATE]
proof (state)
this:
a' = snd (zsplit0 a) \<and> n = i + fst (zsplit0 a)
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
from abj 3 m
[PROOF STATE]
proof (chain)
picking this:
zsplit0 a = (fst (zsplit0 a), snd (zsplit0 a))
zsplit0 a = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) a \<and> numbound0 ?a
zsplit0 (CN m i a) = (n, a')
m = 0
[PROOF STEP]
have th2: "(?I x (CN 0 ?j ?b) = ?I x a) \<and> ?N ?b"
[PROOF STATE]
proof (prove)
using this:
zsplit0 a = (fst (zsplit0 a), snd (zsplit0 a))
zsplit0 a = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) a \<and> numbound0 ?a
zsplit0 (CN m i a) = (n, a')
m = 0
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
from th
[PROOF STATE]
proof (chain)
picking this:
a' = snd (zsplit0 a) \<and> n = i + fst (zsplit0 a)
[PROOF STEP]
have "?I x (CN 0 n a') = ?I x (CN 0 (i + ?j) ?b)"
[PROOF STATE]
proof (prove)
using this:
a' = snd (zsplit0 a) \<and> n = i + fst (zsplit0 a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 (i + fst (zsplit0 a)) (snd (zsplit0 a)))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 (i + fst (zsplit0 a)) (snd (zsplit0 a)))
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 (i + fst (zsplit0 a)) (snd (zsplit0 a)))
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
from th2
[PROOF STATE]
proof (chain)
picking this:
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
[PROOF STEP]
have "\<dots> = ?I x (CN 0 i (CN 0 ?j ?b))"
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (i + fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) (CN 0 i (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))))
[PROOF STEP]
by (simp add: distrib_right)
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (i + fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) (CN 0 i (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))))
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))))
[PROOF STEP]
have "?I x (CN 0 n a') = ?I x (CN 0 i a)"
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))))
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i a)
[PROOF STEP]
using th2
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))))
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i a)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i a)
goal (1 subgoal):
1. m = 0 \<Longrightarrow> Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
with th2 th m
[PROOF STATE]
proof (chain)
picking this:
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
a' = snd (zsplit0 a) \<and> n = i + fst (zsplit0 a)
m = 0
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i a)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (CN 0 (fst (zsplit0 a)) (snd (zsplit0 a))) = Inum (x # bs) a \<and> numbound0 (snd (zsplit0 a))
a' = snd (zsplit0 a) \<and> n = i + fst (zsplit0 a)
m = 0
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN 0 i a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a') = Inum (x # bs) (CN m i a) \<and> numbound0 a'
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
case (4 t n a)
[PROOF STATE]
proof (state)
this:
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Neg t) = (n, a)
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?nt = "fst (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?at = "snd (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have abj: "zsplit0 t = (?nt, ?at)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
have th: "a = Neg ?at \<and> n = - ?nt"
[PROOF STATE]
proof (prove)
using this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (1 subgoal):
1. a = Neg (snd (zsplit0 t)) \<and> n = - fst (zsplit0 t)
[PROOF STEP]
using 4
[PROOF STATE]
proof (prove)
using this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Neg t) = (n, a)
goal (1 subgoal):
1. a = Neg (snd (zsplit0 t)) \<and> n = - fst (zsplit0 t)
[PROOF STEP]
by (simp add: Let_def split_def)
[PROOF STATE]
proof (state)
this:
a = Neg (snd (zsplit0 t)) \<and> n = - fst (zsplit0 t)
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from abj 4
[PROOF STATE]
proof (chain)
picking this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Neg t) = (n, a)
[PROOF STEP]
have th2: "(?I x (CN 0 ?nt ?at) = ?I x t) \<and> ?N ?at"
[PROOF STATE]
proof (prove)
using this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Neg t) = (n, a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
goal (4 subgoals):
1. \<And>a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Neg a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Neg a) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
3. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
4. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from th2[simplified] th[simplified]
[PROOF STATE]
proof (chain)
picking this:
fst (zsplit0 t) * x + Inum (x # bs) (snd (zsplit0 t)) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
a = Neg (snd (zsplit0 t)) \<and> n = - fst (zsplit0 t)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
fst (zsplit0 t) * x + Inum (x # bs) (snd (zsplit0 t)) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
a = Neg (snd (zsplit0 t)) \<and> n = - fst (zsplit0 t)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Neg t) \<and> numbound0 a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Neg t) \<and> numbound0 a
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
case (5 s t n a)
[PROOF STATE]
proof (state)
this:
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Add s t) = (n, a)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?ns = "fst (zsplit0 s)"
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?as = "snd (zsplit0 s)"
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?nt = "fst (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?at = "snd (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have abjs: "zsplit0 s = (?ns, ?as)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have abjt: "zsplit0 t = (?nt, ?at)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
have th: "a = Add ?as ?at \<and> n = ?ns + ?nt"
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (1 subgoal):
1. a = Add (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) + fst (zsplit0 t)
[PROOF STEP]
using 5
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Add s t) = (n, a)
goal (1 subgoal):
1. a = Add (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) + fst (zsplit0 t)
[PROOF STEP]
by (simp add: Let_def split_def)
[PROOF STATE]
proof (state)
this:
a = Add (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) + fst (zsplit0 t)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from abjs[symmetric]
[PROOF STATE]
proof (chain)
picking this:
(fst (zsplit0 s), snd (zsplit0 s)) = zsplit0 s
[PROOF STEP]
have bluddy: "\<exists>x y. (x, y) = zsplit0 s"
[PROOF STATE]
proof (prove)
using this:
(fst (zsplit0 s), snd (zsplit0 s)) = zsplit0 s
goal (1 subgoal):
1. \<exists>x y. (x, y) = zsplit0 s
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>x y. (x, y) = zsplit0 s
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from 5
[PROOF STATE]
proof (chain)
picking this:
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Add s t) = (n, a)
[PROOF STEP]
have "(\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow>
(\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)"
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Add s t) = (n, a)
goal (1 subgoal):
1. (\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
with bluddy abjt
[PROOF STATE]
proof (chain)
picking this:
\<exists>x y. (x, y) = zsplit0 s
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
(\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
[PROOF STEP]
have th3: "(?I x (CN 0 ?nt ?at) = ?I x t) \<and> ?N ?at"
[PROOF STATE]
proof (prove)
using this:
\<exists>x y. (x, y) = zsplit0 s
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
(\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from abjs 5
[PROOF STATE]
proof (chain)
picking this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Add s t) = (n, a)
[PROOF STEP]
have th2: "(?I x (CN 0 ?ns ?as) = ?I x s) \<and> ?N ?as"
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Add s t) = (n, a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (fst (zsplit0 s)) (snd (zsplit0 s))) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (fst (zsplit0 s)) (snd (zsplit0 s))) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
goal (3 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Add a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Add a b) \<and> numbound0 aa
2. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
3. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from th3[simplified] th2[simplified] th[simplified]
[PROOF STATE]
proof (chain)
picking this:
fst (zsplit0 t) * x + Inum (x # bs) (snd (zsplit0 t)) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
fst (zsplit0 s) * x + Inum (x # bs) (snd (zsplit0 s)) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
a = Add (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) + fst (zsplit0 t)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
fst (zsplit0 t) * x + Inum (x # bs) (snd (zsplit0 t)) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
fst (zsplit0 s) * x + Inum (x # bs) (snd (zsplit0 s)) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
a = Add (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) + fst (zsplit0 t)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Add s t) \<and> numbound0 a
[PROOF STEP]
by (simp add: distrib_right)
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Add s t) \<and> numbound0 a
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
case (6 s t n a)
[PROOF STATE]
proof (state)
this:
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Sub s t) = (n, a)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?ns = "fst (zsplit0 s)"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?as = "snd (zsplit0 s)"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?nt = "fst (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?at = "snd (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have abjs: "zsplit0 s = (?ns, ?as)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have abjt: "zsplit0 t = (?nt, ?at)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
have th: "a = Sub ?as ?at \<and> n = ?ns - ?nt"
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (1 subgoal):
1. a = Sub (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) - fst (zsplit0 t)
[PROOF STEP]
using 6
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Sub s t) = (n, a)
goal (1 subgoal):
1. a = Sub (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) - fst (zsplit0 t)
[PROOF STEP]
by (simp add: Let_def split_def)
[PROOF STATE]
proof (state)
this:
a = Sub (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) - fst (zsplit0 t)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from abjs[symmetric]
[PROOF STATE]
proof (chain)
picking this:
(fst (zsplit0 s), snd (zsplit0 s)) = zsplit0 s
[PROOF STEP]
have bluddy: "\<exists>x y. (x, y) = zsplit0 s"
[PROOF STATE]
proof (prove)
using this:
(fst (zsplit0 s), snd (zsplit0 s)) = zsplit0 s
goal (1 subgoal):
1. \<exists>x y. (x, y) = zsplit0 s
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>x y. (x, y) = zsplit0 s
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from 6
[PROOF STATE]
proof (chain)
picking this:
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Sub s t) = (n, a)
[PROOF STEP]
have "(\<exists>x y. (x,y) = zsplit0 s) \<longrightarrow>
(\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)"
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Sub s t) = (n, a)
goal (1 subgoal):
1. (\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
with bluddy abjt
[PROOF STATE]
proof (chain)
picking this:
\<exists>x y. (x, y) = zsplit0 s
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
(\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
[PROOF STEP]
have th3: "(?I x (CN 0 ?nt ?at) = ?I x t) \<and> ?N ?at"
[PROOF STATE]
proof (prove)
using this:
\<exists>x y. (x, y) = zsplit0 s
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
(\<exists>x y. (x, y) = zsplit0 s) \<longrightarrow> (\<forall>xa xb. zsplit0 t = (xa, xb) \<longrightarrow> Inum (x # bs) (CN 0 xa xb) = Inum (x # bs) t \<and> numbound0 xb)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from abjs 6
[PROOF STATE]
proof (chain)
picking this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Sub s t) = (n, a)
[PROOF STEP]
have th2: "(?I x (CN 0 ?ns ?as) = ?I x s) \<and> ?N ?as"
[PROOF STATE]
proof (prove)
using this:
zsplit0 s = (fst (zsplit0 s), snd (zsplit0 s))
zsplit0 s = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) s \<and> numbound0 ?a
\<lbrakk>?x = zsplit0 s; (?xa, ?y) = ?x; zsplit0 t = (?n, ?a)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Sub s t) = (n, a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (fst (zsplit0 s)) (snd (zsplit0 s))) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (fst (zsplit0 s)) (snd (zsplit0 s))) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
goal (2 subgoals):
1. \<And>a b n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; \<And>xa xaa y n aa. \<lbrakk>xa = zsplit0 a; (xaa, y) = xa; zsplit0 b = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) b \<and> numbound0 aa; zsplit0 (Sub a b) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Sub a b) \<and> numbound0 aa
2. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from th3[simplified] th2[simplified] th[simplified]
[PROOF STATE]
proof (chain)
picking this:
fst (zsplit0 t) * x + Inum (x # bs) (snd (zsplit0 t)) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
fst (zsplit0 s) * x + Inum (x # bs) (snd (zsplit0 s)) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
a = Sub (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) - fst (zsplit0 t)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
fst (zsplit0 t) * x + Inum (x # bs) (snd (zsplit0 t)) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
fst (zsplit0 s) * x + Inum (x # bs) (snd (zsplit0 s)) = Inum (x # bs) s \<and> numbound0 (snd (zsplit0 s))
a = Sub (snd (zsplit0 s)) (snd (zsplit0 t)) \<and> n = fst (zsplit0 s) - fst (zsplit0 t)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Sub s t) \<and> numbound0 a
[PROOF STEP]
by (simp add: left_diff_distrib)
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Sub s t) \<and> numbound0 a
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
case (7 i t n a)
[PROOF STATE]
proof (state)
this:
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Mul i t) = (n, a)
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?nt = "fst (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
let ?at = "snd (zsplit0 t)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have abj: "zsplit0 t = (?nt,?at)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
[PROOF STEP]
have th: "a = Mul i ?at \<and> n = i * ?nt"
[PROOF STATE]
proof (prove)
using this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
goal (1 subgoal):
1. a = Mul i (snd (zsplit0 t)) \<and> n = i * fst (zsplit0 t)
[PROOF STEP]
using 7
[PROOF STATE]
proof (prove)
using this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Mul i t) = (n, a)
goal (1 subgoal):
1. a = Mul i (snd (zsplit0 t)) \<and> n = i * fst (zsplit0 t)
[PROOF STEP]
by (simp add: Let_def split_def)
[PROOF STATE]
proof (state)
this:
a = Mul i (snd (zsplit0 t)) \<and> n = i * fst (zsplit0 t)
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
from abj 7
[PROOF STATE]
proof (chain)
picking this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Mul i t) = (n, a)
[PROOF STEP]
have th2: "(?I x (CN 0 ?nt ?at) = ?I x t) \<and> ?N ?at"
[PROOF STATE]
proof (prove)
using this:
zsplit0 t = (fst (zsplit0 t), snd (zsplit0 t))
zsplit0 t = (?n, ?a) \<Longrightarrow> Inum (x # bs) (CN 0 ?n ?a) = Inum (x # bs) t \<and> numbound0 ?a
zsplit0 (Mul i t) = (n, a)
goal (1 subgoal):
1. Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
[PROOF STEP]
have "?I x (Mul i t) = i * ?I x (CN 0 ?nt ?at)"
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
goal (1 subgoal):
1. Inum (x # bs) (Mul i t) = i * Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t)))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (Mul i t) = i * Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t)))
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (Mul i t) = i * Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t)))
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
have "\<dots> = ?I x (CN 0 (i*?nt) (Mul i ?at))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. i * Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) (CN 0 (i * fst (zsplit0 t)) (Mul i (snd (zsplit0 t))))
[PROOF STEP]
by (simp add: distrib_left)
[PROOF STATE]
proof (state)
this:
i * Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) (CN 0 (i * fst (zsplit0 t)) (Mul i (snd (zsplit0 t))))
goal (1 subgoal):
1. \<And>i a n aa. \<lbrakk>\<And>n aa. zsplit0 a = (n, aa) \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) a \<and> numbound0 aa; zsplit0 (Mul i a) = (n, aa)\<rbrakk> \<Longrightarrow> Inum (x # bs) (CN 0 n aa) = Inum (x # bs) (Mul i a) \<and> numbound0 aa
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
Inum (x # bs) (Mul i t) = Inum (x # bs) (CN 0 (i * fst (zsplit0 t)) (Mul i (snd (zsplit0 t))))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (Mul i t) = Inum (x # bs) (CN 0 (i * fst (zsplit0 t)) (Mul i (snd (zsplit0 t))))
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Mul i t) \<and> numbound0 a
[PROOF STEP]
using th th2
[PROOF STATE]
proof (prove)
using this:
Inum (x # bs) (Mul i t) = Inum (x # bs) (CN 0 (i * fst (zsplit0 t)) (Mul i (snd (zsplit0 t))))
a = Mul i (snd (zsplit0 t)) \<and> n = i * fst (zsplit0 t)
Inum (x # bs) (CN 0 (fst (zsplit0 t)) (snd (zsplit0 t))) = Inum (x # bs) t \<and> numbound0 (snd (zsplit0 t))
goal (1 subgoal):
1. Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Mul i t) \<and> numbound0 a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Inum (x # bs) (CN 0 n a) = Inum (x # bs) (Mul i t) \<and> numbound0 a
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 44236, "file": null, "length": 146}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 4 17:41:56 2018
@author: hubert kyeremateng-boateng
"""
import numpy as np
import pandas as pd
recipes = pd.read_csv('arp_dataset.csv', header=None)
recipes.rename(columns={0: 'name'}, inplace=True)
print(np.transpose(recipes))
|
{"hexsha": "8ecf4e191c51888fe96cf70ed22b287feafa153f", "size": 300, "ext": "py", "lang": "Python", "max_stars_repo_path": "SVM_example.py", "max_stars_repo_name": "COSC-Research-BowieState/ARP_Simulator", "max_stars_repo_head_hexsha": "ec109253bc3cc6903252f6f24b1205b8e7bab189", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SVM_example.py", "max_issues_repo_name": "COSC-Research-BowieState/ARP_Simulator", "max_issues_repo_head_hexsha": "ec109253bc3cc6903252f6f24b1205b8e7bab189", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SVM_example.py", "max_forks_repo_name": "COSC-Research-BowieState/ARP_Simulator", "max_forks_repo_head_hexsha": "ec109253bc3cc6903252f6f24b1205b8e7bab189", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0769230769, "max_line_length": 53, "alphanum_fraction": 0.71, "include": true, "reason": "import numpy", "num_tokens": 87}
|
#Importo pandas y numpy para crear un DataFrame
import pandas as pd
import numpy as np
#Creo un DataFrame con dos columnas, Celsius y Kelvin, ambas con datos iguales
data = {'Celsius':[22, 36, 20, 26, 30, 38],
'Kelvin':[22, 36, 20, 26, 30, 38]}
#Creo el DataFrame con el su index y las columnas Celsius y Kelvin
df = pd.DataFrame(data, index = ['Londres','Madrid','Barcelona','Sevilla','Cádiz','Lima'])
#Creo una función Lambda para pasar los grados Celsius a Kelvin.
to_kelvin = lambda x: (x + 273,15)
#Aplico la Lambda a la columna [Kelvin] con el método apply()
df = df['Kelvin'].apply(to_kelvin)
print(df)
|
{"hexsha": "884a1e66482fd16546d1b9384ab280cd89c375bd", "size": 622, "ext": "py", "lang": "Python", "max_stars_repo_path": "Lambda/lamda.py", "max_stars_repo_name": "BrianMarquez3/Python-Course", "max_stars_repo_head_hexsha": "2622b4ddfd687505becfd246e82a2ed0cb9b76f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2020-08-19T23:27:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T12:02:17.000Z", "max_issues_repo_path": "Lambda/lamda.py", "max_issues_repo_name": "BrianMarquez3/Python-Course", "max_issues_repo_head_hexsha": "2622b4ddfd687505becfd246e82a2ed0cb9b76f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-10T18:06:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-10T18:06:05.000Z", "max_forks_repo_path": "Lambda/lamda.py", "max_forks_repo_name": "BrianMarquez3/Python-Course", "max_forks_repo_head_hexsha": "2622b4ddfd687505becfd246e82a2ed0cb9b76f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-03T19:35:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-10T14:58:39.000Z", "avg_line_length": 34.5555555556, "max_line_length": 90, "alphanum_fraction": 0.7041800643, "include": true, "reason": "import numpy", "num_tokens": 197}
|
(*
* SPDX-License-Identifier: MIT
*
*)
Inductive label {A} : Type :=
Silent : label
| Action : A -> label
.
Inductive type : Set :=
| Access
| Bool
| Nat
| Unit
| TPair (t1 t2 : type)
.
Module Type GRANT_ACCESS.
Parameter access : Set.
End GRANT_ACCESS.
Module Messages (GA : GRANT_ACCESS).
Inductive message : type -> Type :=
| Permission (acc : GA.access) : message Access
| Content (n : nat) : message Nat
| MsgPair {t1 t2} (m1 : message t1) (m2 : message t2) : message (TPair t1 t2)
.
Fixpoint typeDenote (t : type) :=
match t with
| Access => GA.access
| Bool => bool
| Nat => nat
| Unit => unit
| TPair t1 t2 => (typeDenote t1 * typeDenote t2)%type
end
.
Definition extractContent (msg : message Nat) : nat :=
match msg with
| Content t => t
end.
Definition extractPermission (msg : message Access) : GA.access :=
match msg with
| Permission a => a
end.
Definition msgFst {t1 t2} (msg : message (TPair t1 t2)) : (message t1) :=
match msg with
| MsgPair m1 _ => m1
end.
Definition msgSnd {t1 t2} (msg : message (TPair t1 t2)) : (message t2) :=
match msg with
| MsgPair _ m2 => m2
end.
End Messages.
|
{"author": "spicy-paper", "repo": "spicy", "sha": "14b766c24bb546861e623b6681b2e71653234681", "save_path": "github-repos/coq/spicy-paper-spicy", "path": "github-repos/coq/spicy-paper-spicy/spicy-14b766c24bb546861e623b6681b2e71653234681/src/Messages.v"}
|
import sys
import os
import numpy as np
import pytest
sys.path.append('..')
import autodiff as ad
def test_composite():
#Test some more complicated functions / identities, including some multivariate ones.
x = ad.Scalar('x', 2)
z = (5 * (x + 20) / 10) ** 2
d = z.getGradient(['x'])
assert(z.getValue() == 121)
assert(np.array_equal(d, [11]))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
z = (x + 20) * y
d = z.getGradient(['x', 'y'])
assert(z.getValue() == 66)
assert(np.array_equal(d, [3, 22]))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 3)
z = (x * y + x) * y
d = z.getGradient(['x', 'y'])
assert(z.getValue() == 12)
assert(np.array_equal(d, [12, 7]))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
z = (x + y) / y
d = z.getGradient(['x', 'y'])
assert(np.isclose(z.getValue(), 5/3))
assert(np.allclose(d, [1.0/3, -2.0/9]))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
z = x + ((y ** 2) / y)
d = z.getGradient(['x', 'y'])
assert(z.getValue() == 5)
assert(np.array_equal(d, [1, 1]))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
z = (x ** y) ** 2
d = z.getGradient(['x', 'y'])
assert(z.getValue() == 64)
assert(np.array_equal(d, [6 * 32, 2 * np.log(2) * 64]))
x = ad.Scalar('x', 16)
y = ad.sin(ad.sqrt(x))
assert(np.isclose(y.getValue(), np.sin(4)))
assert(np.isclose(y.getDeriv()['x'], 1/8 * np.cos(4)))
#trig identity
x = ad.Scalar('x', 5)
y = ad.sin(x) ** 2 + ad.cos(x) ** 2
assert(np.isclose(y.getValue(), 1))
assert(np.isclose(y.getDeriv()['x'], 0))
#trig identity multivariate
x = ad.Scalar('x', 5)
y = ad.Scalar('y', 243423)
z = ad.sin(x * 1231 * y) ** 2 + ad.cos(x * 1231 * y) ** 2
assert(np.isclose(z.getValue(), 1))
assert(np.isclose(z.getDeriv()['x'], 0))
assert(np.isclose(z.getDeriv()['y'], 0))
x = ad.Scalar('x', 16)
y = ad.sqrt(ad.power(x, 2))
assert(y.getValue() == 16)
assert(y.getDeriv()['x'] == 1)
x = ad.Scalar('x', 10)
y = ad.tan(x) * ad.cos(x) / ad.sin(x)
assert(np.isclose(y.getValue(), 1))
assert(np.isclose(y.getDeriv()['x'], 0))
#https://math.berkeley.edu/~kruckman/fall2010/9-29-problems.pdf
x = ad.Scalar('x', 10)
y = (x ** 2 * ad.sin(x) / (x ** 2 + 1))
assert(np.isclose(y.getValue(), 100 * np.sin(10) / (101)))
assert(np.isclose(y.getDeriv()['x'], (20 * np.sin(10) + (10 ** 4 + 100) * np.cos(10)) / (101 ** 2)))
x = ad.Scalar('x', 4)
y = (x ** 3 * ad.exp(x))
assert(np.isclose(y.getValue(), 4 ** 3 * (np.exp(4))))
assert(np.isclose(y.getDeriv()['x'], 3 * 16 * np.exp(4) + 4 ** 3 * np.exp(4)))
x = ad.Scalar('x', 4)
y = ad.sin(x) * ad.cos(x) * ad.tan(x)
assert(np.isclose(y.getValue(), np.sin(4) * np.cos(4) * np.tan(4)))
assert(np.isclose(y.getDeriv()['x'], np.sin(8)))
x = ad.Scalar('x', 4)
y = ad.sqrt(x) / ad.tan(x)
assert(np.isclose(y.getValue(), 2 / np.tan(4)))
assert(np.isclose(y.getDeriv()['x'], 1 / (np.tan(4) * 4) - 2 / (np.sin(4) ** 2)))
x = ad.Scalar('x', 4)
y = ad.exp(ad.sqrt(x + 1))
assert(np.isclose(y.getValue(), np.exp(5 ** 0.5)))
assert(np.isclose(y.getDeriv()['x'], np.exp(5 ** 0.5) / (2 * (5 ** 0.5))))
x = ad.Scalar('x', 4)
y = ad.exp(ad.sin(ad.exp(x)))
assert(np.isclose(y.getValue(), np.exp(np.sin(np.exp(4)))))
assert(np.isclose(y.getDeriv()['x'], np.cos(np.exp(4)) * np.exp(np.sin(np.exp(4)) + 4)))
x = ad.Scalar('x', 4)
y = (ad.sin(x ** (1/3))) ** (1/3)
assert(np.isclose(y.getValue(), np.sin(4 ** (1/3)) ** (1/3)))
assert(np.isclose(y.getDeriv()['x'], np.cos(4 ** (1/3)) / (9 * ((4 * np.sin(4 ** (1/3))) ** (2/3)))))
x = ad.Scalar('x', 16)
y = ad.Scalar('y', 9)
z = ad.sqrt(x * y)
assert(z.getValue() == 12)
assert(np.isclose(z.getDeriv()['x'], 9/2 * (16 * 9)**(-0.5)))
assert(np.isclose(z.getDeriv()['y'], 16/2 * (16 * 9)**(-0.5)))
x = ad.Scalar('x', 16)
y = ad.Scalar('y', 9)
z = ad.sqrt(x * (y ** 4))
assert(z.getValue() == 4 * 81)
assert(np.isclose(z.getDeriv()['x'], (9 ** 4)/2 * (16 * (9 ** 4))**(-0.5)))
assert(np.isclose(z.getDeriv()['y'], (4 * 16 * (9 ** 3))/2 * (16 * (9 ** 4))**(-0.5)))
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
z = ad.cos(ad.sin(x * y))
assert(np.isclose(z.getValue(), np.cos(np.sin(6))))
assert(np.isclose(z.getDeriv()['x'], -2 * np.cos(6) * np.sin(np.sin(6))))
assert(np.isclose(z.getDeriv()['y'], -3 * np.cos(6) * np.sin(np.sin(6))))
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
x = ad.log(ad.Scalar('x', 10), 10)
x2 = ad.log(ad.Scalar('y', 100), 10)
x3 = x - x2
x4 = ad.log(ad.Scalar('x', 10) / ad.Scalar('y', 100), 10)
assert(np.isclose(x3.getValue(), -1))
assert(np.isclose(x3.getDeriv()['x'], 1 / (np.log(10) * 10)))
assert(np.isclose(x3.getDeriv()['y'], -1 / (np.log(10) * 100)))
assert(np.isclose(x3.getValue(), x4.getValue()))
assert(np.isclose(x3.getDeriv()['x'], x4.getDeriv()['x']))
assert(np.isclose(x3.getDeriv()['y'], x4.getDeriv()['y']))
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
z = ad.sin(2 * x + ad.exp(y ** 2) + 4 * x * y)
assert(np.isclose(z.getValue(), np.sin(6 + np.exp(4) + 24)))
assert(np.isclose(z.getDeriv()['x'], np.cos(6 + np.exp(4) + 24) * (10)))
assert(np.isclose(z.getDeriv()['y'],np.cos(6 + np.exp(4) + 24) * (4 * np.exp(4) + 12)))
#http://math.gmu.edu/~memelian/teaching/Fall08/partDerivExamples.pdf
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
z = x * ad.exp(2 * x + 3 * y)
assert(np.isclose(z.getValue(), 3 * np.exp(12)))
assert(np.isclose(z.getDeriv()['x'], 6 * np.exp(12) + np.exp(12)))
assert(np.isclose(z.getDeriv()['y'], 9 * np.exp(12)))
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
z = (x - y) / (x + y)
assert(np.isclose(z.getValue(), 1 / 5))
assert(np.isclose(z.getDeriv()['x'], 4 / 25))
assert(np.isclose(z.getDeriv()['y'], -6 / 25))
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
z = ad.Scalar('z', 5)
f = x * ad.cos(z) + ((x ** 2) * (y ** 3) * ad.exp(z))
assert(np.isclose(f.getValue(), 3 * np.cos(5) + (9 * 8 * np.exp(5))))
assert(np.isclose(f.getDeriv()['x'], np.cos(5) + 2 * 3 * 8 * np.exp(5)))
assert(np.isclose(f.getDeriv()['y'], 3 * 9 * 4 * np.exp(5)))
assert(np.isclose(f.getDeriv()['z'], -3 * np.sin(5) + 9 * 8 * np.exp(5)))
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
f = (y / x) * ad.ln(x)
assert(np.isclose(f.getValue(), 2/3 * np.log(3)))
assert(np.isclose(f.getDeriv()['x'], 2/9 * (1- np.log(3))))
assert(np.isclose(f.getDeriv()['y'], 1/3 * np.log(3)))
x = ad.Scalar('x', 3)
y = ad.Scalar('y', 2)
f = 1 / (x ** 2 + y ** 2)
assert(np.isclose(f.getValue(), 1 /13))
assert(np.isclose(f.getDeriv()['x'], -6 / (13 ** 2)))
assert(np.isclose(f.getDeriv()['y'], -4 / (13 ** 2)))
|
{"hexsha": "68776ac24c27c3348fdcb5c57b604f3cf11f13cf", "size": 7032, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_composite.py", "max_stars_repo_name": "cs207FinalProjectGroup/cs207-FinalProject", "max_stars_repo_head_hexsha": "faa78f023df43c13f2ccd4711835c4313f193c9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_composite.py", "max_issues_repo_name": "cs207FinalProjectGroup/cs207-FinalProject", "max_issues_repo_head_hexsha": "faa78f023df43c13f2ccd4711835c4313f193c9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_composite.py", "max_forks_repo_name": "cs207FinalProjectGroup/cs207-FinalProject", "max_forks_repo_head_hexsha": "faa78f023df43c13f2ccd4711835c4313f193c9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9710144928, "max_line_length": 112, "alphanum_fraction": 0.5007110353, "include": true, "reason": "import numpy", "num_tokens": 2710}
|
# Univariate
const VectorOfUnivariate = Distributions.Product
function arraydist(dists::AbstractVector{<:UnivariateDistribution})
return Product(dists)
end
function Distributions.logpdf(dist::VectorOfUnivariate, x::AbstractMatrix{<:Real})
size(x, 1) == length(dist) ||
throw(DimensionMismatch("Inconsistent array dimensions."))
# `eachcol` breaks Zygote, so we use `view` directly
return map(i -> sum(map(logpdf, dist.v, view(x, :, i))), axes(x, 2))
end
struct MatrixOfUnivariate{
S <: ValueSupport,
Tdist <: UnivariateDistribution{S},
Tdists <: AbstractMatrix{Tdist},
} <: MatrixDistribution{S}
dists::Tdists
end
Base.size(dist::MatrixOfUnivariate) = size(dist.dists)
function arraydist(dists::AbstractMatrix{<:UnivariateDistribution})
return MatrixOfUnivariate(dists)
end
function Distributions._logpdf(dist::MatrixOfUnivariate, x::AbstractMatrix{<:Real})
# return sum(((d, xi),) -> logpdf(d, xi), zip(dist.dists, x))
# Broadcasting here breaks Tracker for some reason
return sum(map(logpdf, dist.dists, x))
end
function Distributions.logpdf(dist::MatrixOfUnivariate, x::AbstractArray{<:AbstractMatrix{<:Real}})
return map(x -> logpdf(dist, x), x)
end
function Distributions.logpdf(dist::MatrixOfUnivariate, x::AbstractArray{<:Matrix{<:Real}})
return map(x -> logpdf(dist, x), x)
end
function Distributions.rand(rng::Random.AbstractRNG, dist::MatrixOfUnivariate)
return rand.(Ref(rng), dist.dists)
end
# Multivariate
struct VectorOfMultivariate{
S <: ValueSupport,
Tdist <: MultivariateDistribution{S},
Tdists <: AbstractVector{Tdist},
} <: MatrixDistribution{S}
dists::Tdists
end
Base.size(dist::VectorOfMultivariate) = (length(dist.dists[1]), length(dist))
Base.length(dist::VectorOfMultivariate) = length(dist.dists)
function arraydist(dists::AbstractVector{<:MultivariateDistribution})
return VectorOfMultivariate(dists)
end
function Distributions._logpdf(dist::VectorOfMultivariate, x::AbstractMatrix{<:Real})
# `eachcol` breaks Zygote, so we use `view` directly
return sum(i -> logpdf(dist.dists[i], view(x, :, i)), axes(x, 2))
end
function Distributions.logpdf(dist::VectorOfMultivariate, x::AbstractArray{<:AbstractMatrix{<:Real}})
return map(x -> logpdf(dist, x), x)
end
function Distributions.logpdf(dist::VectorOfMultivariate, x::AbstractArray{<:Matrix{<:Real}})
return map(x -> logpdf(dist, x), x)
end
function Distributions.rand(rng::Random.AbstractRNG, dist::VectorOfMultivariate)
init = reshape(rand(rng, dist.dists[1]), :, 1)
return mapreduce(i -> rand(rng, dist.dists[i]), hcat, 2:length(dist); init = init)
end
|
{"hexsha": "6b3a76d97a63dd0aa202e4bc76f5f6cb5bcae256", "size": 2652, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/arraydist.jl", "max_stars_repo_name": "bmharsha/DistributionsAD.jl", "max_stars_repo_head_hexsha": "fe2070012167b78c84a49733a0a64997e9533812", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/arraydist.jl", "max_issues_repo_name": "bmharsha/DistributionsAD.jl", "max_issues_repo_head_hexsha": "fe2070012167b78c84a49733a0a64997e9533812", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/arraydist.jl", "max_forks_repo_name": "bmharsha/DistributionsAD.jl", "max_forks_repo_head_hexsha": "fe2070012167b78c84a49733a0a64997e9533812", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3287671233, "max_line_length": 101, "alphanum_fraction": 0.7281297134, "num_tokens": 725}
|
"""
ET Correction Tool:
This script creates evapotranspiration Dfs2 from single/multiple reference ET
time-series, and applies spatially, monthly varying solar radiation correction
factors to the reference ET data and creates the MIKE SHE input ET Dfs2 file.
Created on Wed Apr 28 15:50:07 2021
@author: Shubhneet Singh
ssin@dhigroup.com
DHI,US
"""
# marks dependencies
import os
import clr
import sys
import time
import numpy as np #
import pandas as pd #
import datetime as dt
import shapefile #pyshp
from winreg import ConnectRegistry, OpenKey, HKEY_LOCAL_MACHINE, QueryValueEx
def get_mike_bin_directory_from_registry():
x86 = False
dhiRegistry = "SOFTWARE\Wow6432Node\DHI\\"
aReg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
try:
_ = OpenKey(aReg, dhiRegistry)
except FileNotFoundError:
x86 = True
dhiRegistry = "SOFTWARE\Wow6432Node\DHI\\"
aReg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
try:
_ = OpenKey(aReg, dhiRegistry)
except FileNotFoundError:
raise FileNotFoundError
year = 2030
while year > 2010:
try:
mikeHomeDirKey = OpenKey(aReg, dhiRegistry + str(year))
except FileNotFoundError:
year -= 1
continue
if year > 2020:
mikeHomeDirKey = OpenKey(aReg, dhiRegistry + "MIKE Zero\\" + str(year))
mikeBin = QueryValueEx(mikeHomeDirKey, "HomeDir")[0]
mikeBin += "bin\\"
if not x86:
mikeBin += "x64\\"
if not os.path.exists(mikeBin):
print(f"Cannot find MIKE ZERO in {mikeBin}")
raise NotADirectoryError
return mikeBin
print("Cannot find MIKE ZERO")
return ""
sys.path.append(get_mike_bin_directory_from_registry())
clr.AddReference("DHI.Generic.MikeZero.DFS")
clr.AddReference("DHI.Generic.MikeZero.EUM")
clr.AddReference("DHI.Projections")
from mikeio import * #
from mikeio.eum import ItemInfo
from shapely.geometry import Polygon, Point #
from tkinter import Frame, Label, Button, Entry, Tk, W, END
from tkinter import messagebox as tkMessageBox
# from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
#------------------------------------------------------------------------------
## File locations for testing tool:
# PolygonsShapefileName = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\NLDASzones3.shp"
# SolarRadiationShapefileName = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\SolarRad_SCALING_bymonth.shp"
# refDfs0path= r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\PET_NLDAS2000_2020_1st10.dfs0"
# projpath = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\SolarRad_SCALING_bymonth.prj"
# filePath = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\Test.dfs2"
#------------------------------------------------------------------------------
# Read reference ET Dfs0 file(s), and create a dataframe:
def ReferenceET2Dataframe(refDfs0path):
ReferenceET_Directory = os.path.dirname(refDfs0path)
os.chdir(ReferenceET_Directory)
input_file_names = os.listdir(ReferenceET_Directory)
ReferenceET_File_Names = [filenames for filenames in input_file_names
if filenames.endswith('dfs0')]
ReferenceET_df = pd.DataFrame()
for num_ET in range(len(ReferenceET_File_Names)):
ReferenceET_dfs0 = Dfs0(ReferenceET_File_Names[num_ET]).to_dataframe()
ReferenceET_df = pd.concat([ReferenceET_df, ReferenceET_dfs0], axis=1)
return ReferenceET_df
# Reference ET metadata for creating Dfs2:
def RefETMetadata(refDfs0path):
ReferenceET_df = ReferenceET2Dataframe(refDfs0path)
metadata_file = Dfs0(refDfs0path)
ETMetadata = {
"NumStations" : ReferenceET_df.shape[1],
"Type" : metadata_file.items[0].type,
"Unit" : metadata_file.items[0].unit,
"StartTime" : ReferenceET_df.index[0],
"NumTimesteps" : len(ReferenceET_df.index),
"Timestep" : (ReferenceET_df.index[1]-ReferenceET_df.index[0]).total_seconds(),
"Max" : round(ReferenceET_df.max().max(),2),
"MaxStation" : ReferenceET_df.max().idxmax(),
"MaxTimestep" : ReferenceET_df.idxmax()[ReferenceET_df.max().idxmax()]
}
return ETMetadata
print('Maximum ET in reference data is '+str(ETMetadata.Max)+' ' +str(ETMetadata.Unit)[8:] +
' at station '+ str(ETMetadata.MaxStation) + ' on ' + str(ETMetadata.MaxTimestep))
#Read correction factor grid shape file:
def Correction_df(SolarRadiationShapefileName):
SolarRadiation_Shapefile = shapefile.Reader(SolarRadiationShapefileName)
SolarRadiation_fields = [field[0] for field
in SolarRadiation_Shapefile.fields[1:]]
SolarRadiation_fields[2:14] = [dt.date(2000, month, 1).strftime('%B')
for month in range(1,13)] #Month Names
SolarRadiation_records = SolarRadiation_Shapefile.records()
SolarRadiation_df = pd.DataFrame(columns = SolarRadiation_fields,
data = SolarRadiation_records)
Grid_X = SolarRadiation_df.X.sort_values(ascending = True).unique()
Grid_Y = SolarRadiation_df.Y.sort_values(ascending = False).unique()
return SolarRadiation_df, Grid_X, Grid_Y
# Correct ref ET by scaling factors and create ET Dfs2 input data nparray:
def ETCorrection(PolygonsShapefileName, SolarRadiationShapefileName, refDfs0path):
# Input ref ET in dataframe
ReferenceET_df = ReferenceET2Dataframe(refDfs0path)
# Ref ET polygons reading:
Polygons_Shapefile = shapefile.Reader(PolygonsShapefileName)
#Excluding deletion flag
Polygons_Shapefile_fieldnames = [field[0] for field
in Polygons_Shapefile.fields[1:]]
for index in range(len(Polygons_Shapefile_fieldnames)):
if Polygons_Shapefile_fieldnames[index] == 'ETStation':
ETStation_field_index = index
Attribute_table = Polygons_Shapefile.records()
ETStation_Names = [Polygons_Shapefile.record(record)[ETStation_field_index]
for record in range(len(Attribute_table))]
Num_Polygons = len(Polygons_Shapefile.shapes())
Polygons_Coordinates = [Polygons_Shapefile.shape(poly).points
for poly in range(Num_Polygons)]
# Correction factor grid reading
SolarRadiation_df, Grid_X, Grid_Y = Correction_df(SolarRadiationShapefileName)
#Find points inside every polygon:
ListPointsinPolygons = [[] for poly in range(len(Polygons_Coordinates))]
for loc in range(len(SolarRadiation_df)):
SolarRadiation_point = Point(SolarRadiation_df.X[loc],
SolarRadiation_df.Y[loc]) #shapely point
for poly in range(len(Polygons_Coordinates)):
ET_Polygon = Polygon(Polygons_Coordinates[poly]) #shapely polygon
if ET_Polygon.contains(SolarRadiation_point):
ListPointsinPolygons[poly].append(loc) # Self Note: check point on poly line
print('Solar radiation points inside each ET polygon identified')
# Define output corrected data array:
Corrected_ET = np.zeros((len(ReferenceET_df),
len(Grid_Y),
len(Grid_X)))
# Corrected_ET = np.zeros((1,
# len(Grid_Y),
# len(Grid_X)))
# Correction of ET data looping all polygons, identifying their ref ET
for poly in range(len(Polygons_Coordinates)):
ThisPolygon_ReferenceET = ReferenceET_df[ETStation_Names[poly]] #[0:1]
ThisPolygon_ReferenceET_Copy = ThisPolygon_ReferenceET.copy()
# Correction of all points within the polygon in loop
for point_index in ListPointsinPolygons[poly]:
for month in range(1,13):
ThisPoint_CorrectionFactor = SolarRadiation_df.iloc[point_index,
month+1]
This_month_index = ThisPolygon_ReferenceET_Copy.index.month==month
# Correction of ref ET for a grid point with correcponding correction factor
if len(This_month_index) !=0:
This_month_values = ThisPolygon_ReferenceET[This_month_index].copy()
ThisPolygon_ReferenceET_Copy[This_month_index] = ThisPoint_CorrectionFactor * This_month_values
#Define spatial location for corrected ET time-series of a grid point
for x in range(len(Grid_X)):
if SolarRadiation_df.X[point_index] == Grid_X[x]:
X=x
break
for y in range(len(Grid_Y)):
if SolarRadiation_df.Y[point_index] == Grid_Y[y]:
Y=y
break
# Store corrected ET time-series
Corrected_ET[:,Y,X] = ThisPolygon_ReferenceET_Copy
print('Grid points in polygon > '+ str(poly) +' corrected for solar radiation')
return Corrected_ET
# Write Dfs2 ouput file:
def buildETDfs(filePath, Corrected_ET, SolarRadiationShapefileName, projpath, refDfs0path):
if os.path.exists(filePath):
os.remove(filePath)
dfs = Dfs2()
#Projection sys from shape file
projString = open(projpath, "r").read()
#ET timeseries data
ETMetadata = RefETMetadata(refDfs0path)
SolarRadiation_df, Grid_X, Grid_Y = Correction_df(SolarRadiationShapefileName)
Dx = Grid_X[1]-Grid_X[0]
Dy = Grid_Y[0]-Grid_Y[1]
dfs.write(filename = filePath,
data = [Corrected_ET],
start_time = ETMetadata["StartTime"],
dt = ETMetadata["Timestep"],
items=[ItemInfo("Evapotranspiration",
ETMetadata["Type"],
ETMetadata["Unit"],
data_value_type='Instantaneous')],
dx = Dx,
dy = Dy,
coordinate = [projString,
Grid_X[0],
Grid_Y[-1],
0],
title="ET_RadiationCorrected")
print('Dfs2 created')
def ETCorrectionTool(refDfs0path, PolygonsShapefileName, SolarRadiationShapefileName, projpath, filePath):
Tool_start_time = time.time()
Corrected_ET = ETCorrection(PolygonsShapefileName, SolarRadiationShapefileName, refDfs0path)
Dfs2_start_time = time.time()
buildETDfs(filePath, Corrected_ET, SolarRadiationShapefileName, projpath, refDfs0path)
print('Writing time' + "- %s seconds" % (time.time() - Dfs2_start_time))
print('Total time'+"- %s seconds" % (time.time() - Tool_start_time))
#------------------------------------------------------------------------------
# UI for this tool:
class interface(Frame):
def __init__(self, master = None):
""" Initialize Frame. """
Frame.__init__(self,master)
self.grid()
self.createWidgets()
def message(self):
tkMessageBox.showinfo("Task Completed", "Reference ET data corrected!")
def run(self):
# input1 - Ref ET timeseries in Dfs0:
filename1 = self.file_name1.get()
# input2 - Polygons for every ET timeseries in shp file:
filename2 = self.file_name2.get()
# input3 - Correction factor grid with monthly values in shape file:
filename3 = self.file_name3.get()
# input4 - Projection file:
filename4 = self.file_name4.get()
# Output:
outputFile = self.file_name5.get()
# Tool
ETCorrectionTool(filename1, filename2, filename3, filename4, outputFile)
self.message()
def createWidgets(self):
# set all labels of inputs:
Label(self, text = "Note: Output Dfs2's start time, time steps, and ET data units will be same as reference ET Dfs0")\
.grid(row=0, columnspan=3,sticky=W)
Label(self, text = "Reference ET (*.dfs0) :")\
.grid(row=1, column=0, sticky=W)
Label(self, text = "ET Polygons (*.shp) :")\
.grid(row=2, column=0, sticky=W)
Label(self, text = "Solar Radiation Factors (*.shp) :")\
.grid(row=3, column=0, sticky=W)
Label(self, text = "Projection (*.prj) :")\
.grid(row=4, column=0, sticky=W)
Label(self, text = "Output Corrected ET (*.dfs2) :")\
.grid(row=5, column=0, sticky=W)
# set buttons
Button(self, text = "Browse", command=self.load_file1, width=10)\
.grid(row=1, column=6, sticky=W)
Button(self, text = "Browse", command=self.load_file2, width=10)\
.grid(row=2, column=6, sticky=W)
Button(self, text = "Browse", command=self.load_file3, width=10)\
.grid(row=3, column=6, sticky=W)
Button(self, text = "Browse", command=self.load_file4, width=10)\
.grid(row=4, column=6, sticky=W)
Button(self, text = "Save As", command=self.load_file5, width=10)\
.grid(row=5, column=6, sticky=W)
Button(self, text = "Run ET Correction", command=self.run, width=20)\
.grid(row=6, column=2, sticky=W)
# set entry field
self.file_name1 = Entry(self, width=65)
self.file_name1.grid(row=1, column=1, columnspan=4, sticky=W)
self.file_name2 = Entry(self, width=65)
self.file_name2.grid(row=2, column=1, columnspan=4, sticky=W)
self.file_name3 = Entry(self, width=65)
self.file_name3.grid(row=3, column=1, columnspan=4, sticky=W)
self.file_name4 = Entry(self, width=65)
self.file_name4.grid(row=4, column=1, columnspan=4, sticky=W)
self.file_name5 = Entry(self, width=65)
self.file_name5.grid(row=5, column=1, columnspan=4, sticky=W)
def load_file1(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name1.delete(0, END)
self.file_name1.insert(0, self.filename)
self.file_name1.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file2(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name2.delete(0, END)
self.file_name2.insert(0, self.filename)
self.file_name2.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file3(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name3.delete(0, END)
self.file_name3.insert(0, self.filename)
self.file_name3.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file4(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name4.delete(0, END)
self.file_name4.insert(0, self.filename)
self.file_name4.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file5(self):
self.filename = asksaveasfilename(initialdir=os.path.curdir,defaultextension=".dfs2", filetypes=(("Dfs2 File", "*.dfs2"),("All Files", "*.*") ))
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name5.delete(0, END)
self.file_name5.insert(0, self.filename)
self.file_name5.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
##### main program
root = Tk()
UI = interface(master=root)
UI.master.title("Evapotranspiration Correction Tool")
UI.master.geometry('680x270')
for child in UI.winfo_children():
child.grid_configure(padx=4, pady =6)
UI.mainloop()
|
{"hexsha": "43265f7fe1295abed8009d2f37a1720ff93f8391", "size": 17421, "ext": "py", "lang": "Python", "max_stars_repo_path": "ET_CorrectionTool.py", "max_stars_repo_name": "Shubbee/ET-Correction-Tool", "max_stars_repo_head_hexsha": "37d35ba7899aa6fc490f2e8b908baf5d82e170d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ET_CorrectionTool.py", "max_issues_repo_name": "Shubbee/ET-Correction-Tool", "max_issues_repo_head_hexsha": "37d35ba7899aa6fc490f2e8b908baf5d82e170d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ET_CorrectionTool.py", "max_forks_repo_name": "Shubbee/ET-Correction-Tool", "max_forks_repo_head_hexsha": "37d35ba7899aa6fc490f2e8b908baf5d82e170d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0797101449, "max_line_length": 153, "alphanum_fraction": 0.599334137, "include": true, "reason": "import numpy", "num_tokens": 4092}
|
function [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (dec)
% by Sundar Krishnan
% 2003, Edited in June, 2004
%
% Description :
% This function Fr_dec2bin.m will convert a POSITIVE Decimal system
% Fraction (dec) to Binary system Fraction Fr_bin.
% Matlab itself has bin2dec.m and dec2bin.m, but there seems to be
% no standard Matlab function when fractions are involved.
%
% This function Fr_bin2dec.m and it's companion / dual function Fr_dec2bin.m
% were developed mainly with a view to get quick results
% while learning Arithmetic (Entropy) Coding in School.
% (Now, more comments have been added to better explain the programme.)
%
% The results of this function are limited in accuracy due to the
% "precision" used in the function num2str.m in addition to
% Floating Point limits and Rounding errors.
%
% Accumulation of errors due to these limits can be seen
% when Fr_bin2dec and Fr_dec2bin are tested back-to-back in pairs.
%
% After experiments, I observed that the best precision is 16.
% If all the digits of the input bin are used for a pure fraction,
% the results are likely to be more accurate since we have more margin
% wrt the limit of 16 digits.
%
% Given below under "Usage Eg" are the many cases
% that have been tested during the development of this program,
% together with the results obtained in each case.
%
% Pl do forward me any new case that breaks the code
% beyond the aforesaid limitations.
%
% Outputs str_Fr and Fr_dec are intermediate results.
%
% See also : [Fr_dec, str_Fr, Fr_bin] = Fr_bin2dec (bin)
%
% Additional Test Cases involving pairs of dual tests
% are given towards the end.
%
% ********************
%
% Usage Eg : (The foll have been tried out.)
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.6796875) % 0.1010111
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (113.6796875) % 1110001.1010111
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (113.68359374)
% = 1110001.10101110111111111
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (1045.013671875)
% = 10000010101.000000111
%
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.013671875) % 0.000000111
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.0000131835937)
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (10099300.131835937)
% = 100110100001101001100100.0010000111000000
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (1.0450137e+018)
%
% Also try this ! and enjoy the result :
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (1.0450137e+100)
%
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (2987.120089)
% % = 101110101011.0001111010111110
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (1167892987.120089)
% % = 1000101100111001010000111111011.0001111010111110
%
% &&&&&&&&&&&&
%
% Usage Eg : Check in pairs :
% Fr_dec = Fr_bin2dec (10000010100.0010000111) % = 1.044125000000000e+003
% Fr_bin = Fr_dec2bin (1.044125000000000e+003) % = 10000010100.001
% Fr_bin = Fr_dec2bin ( Fr_bin2dec (10000010100.0010000111) )
% returns Fr_bin = 10000010100.001
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec (101110101011.00011111) )
% returns Fr_bin = 101110101011.0001 (corr to 2987.0625)
% instead of the expected (same) 101110101011.00011111
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ...
% (1000101100111001010000111111011.0001111010111110) )
% returns Fr_bin = 1000101100111001000000000000000.00000000000000000
% (corr to 1167884288)
% instead of the expected (same)
% 1000101100111001010000111111011.0001111010111110
% which itself was obtained with Fr_dec2bin (1167892987.120089)
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec (101110101011.0001111010111110) )
% returns Fr_bin = 101110101011.0001 (corr to 2987.0625)
% instead of the expected (same) 101110101011.0001111010111110
% which itself was obtained with Fr_dec2bin (2987.120089)
%
%
% ********************
% 1) Inits :
Fr_bin = 0 ;
exp_power = 0 ;
% &&&&&&&&&&&&
% 2) Use num2str to convert the input to string :
%
% After experiments, I observed that the best precision is 16.
% For eg, with precision >= 17,
% str_Fr = num2str ( .1010111, 17 ) = 0.10101110000000001
% str_Fr = num2str ( .1010111, 16 ) = 0.1010111
%
% num2str.m's output will also contain "0" prefix before the decimal dot "."
% which we remove later.
% 2-a) Check if the input is greater than 1.
% If yes, can we use higher precision ?
% NO, I have found problems with precision > 16 even when the input > 1 !
% So, commenting out the foll code, and retaining precision = 16 only.
% str_Fr = num2str (dec) ;
% if str_Fr > 1
% precision = 48 ;
% else
% precision = 16 ;
% end
precision = 16 ; % See the note above.
str_Fr = num2str (dec, precision) ;
% Some egs of dec = 1045.0137 , 1.0450137e+018 , 0.131835937 , 0.0000131835937
% NOTE : For long input dec strings, pl note that even with precision > 16,
% say, with precision = 48, the input itself is accurately read
% only for the first 16 digits ; or, if it is converted to an exp format,
% then the input is accurately read only till 15 decimals after the dot.
% For eg, if dec = 116789292349873465787.120089,
% the whole integer part is taken as :
% 116789292349873470000 = % 1.1678929234987347e+016
% So, this will by itself creep in errors !
% In general, it is observed errors will creep in
% if the whole integer part > 999999999999999
% &&&&&&&&&&&&
% 3) Now, if str_Fr above is in exp format, as for eg,
% '2.987062500000000e+003', we would like to get it in the form = 2987.0625
%
% I have observed that if the input no < 0.0001 (ie, < 0.0001000...)
% num2str.m's output is in the exp form ie, with powers less than e-005.
% For eg, dec = 0.000100000001 gives str_Fr = 0.000100000001
% But dec = 0.0000999999999999 gives str_Fr = 9.9999999999900001e-005
%
% Also, with precision = 16, num2str.m's output for nos > 1, upto 1.0e+015,
% is WITHOUT the exp form of power. For eg,
% with dec = 999999999999999.9999999999999999
% str_Fr = num2str (dec, 16) % gives = 1.0e+015 = 1 0000 0000 0000 000
%
% For nos > 1e+016, num2str.m's output is in exp form.
if ~isempty ( findstr ( str_Fr, 'e') )
exp_power = 0 ;
[str_Fr_Bef_Exp, exp] = strtok ( str_Fr, 'e' ) ;
% Some egs = str_Fr_Bef_Exp = 1.119996810555458, exp = e-005
[exp_power, ign ] = strtok ( exp, 'e' ) ;
% exp starts with 'e', hence see LHS
exp_power = abs ( str2num (exp_power) ) ;
% Remove the dot at the 2nd place : (as in 1.119996810555458)
% However, there is no dot when it's a pure fraction,
% and is an exact submultiple of 2 !
if length (str_Fr_Bef_Exp) >= 2 ;
str_Fr_Bef_Exp (2) = [] ;
end
if exp (2) == '-' % < 1e-005
for k = 1 : exp_power - 1
str_Fr_Init_Zeros(k) = '0' ;
end
str_Fr = strcat ( '0.', str_Fr_Init_Zeros, str_Fr_Bef_Exp ) ;
elseif exp (2) == '+' % > 1.0e+015
str_Fr = str_Fr_Bef_Exp ;
% Normally, the foll "if" loop should not be necessary
% since exp format does not occur for powers <= 1.0e+015. Still ...
if length ( str_Fr ) > exp_power + 1
str_Fr ( end + 1 ) = str_Fr (end) ;
for j = length (str_Fr) - 1 : -1 : ...
length (str_Fr) - (exp_power + 1) + 1
str_Fr ( j ) = str_Fr (j-1) ;
end
str_Fr (exp_power + 2) = '.' ;
end
% Foll logic when exp_power > 1.0e+015, like for eg,
% str_Fr = '1.0450137e+018'
% implies str_Fr_Bef_Exp = 10450137 (length = 8)
% ie, str_Fr should become 10450137 0000 0000 000 (length = 19)
% ie, padding with 0s at the end is reqd.
if length ( str_Fr ) < exp_power
str_Fr = strcat ( str_Fr, ...
repmat ( ['0'], 1, exp_power - (length ( str_Fr ) - 1) ) ) ;
end
end
end
% &&&&&&&&&&&&
% 4) Separate the whole integer and fraction parts of str_Fr.
[bef_dec, Fr_dec] = strtok ( str_Fr, '.' ) ;
% &&&&&&&&&&&&
% Now, we have bef_dec as the whole integer part, and
% the Fractional part starting "."
% 5) Convert first the whole integer part to binary
% by calling the std Matlab's fn dec2bin.m
bef_bin = dec2bin ( str2num (bef_dec) ) ;
% &&&&&&&&&&&&
% 6) Now, finally, deal with the Fractional Part.
len_strFr = length (Fr_dec) ;
% eg of Fr_dec = '.123456789' or = '.000000001' or = '.12402343750000'
% The Fractional Part Fr_bin should start here with the dot :
% We will later concatenate bef_bin and Fr_bin
%
% Note : The part about the Fractional Part Fr_bin is not as starightforward
% as the Fractional part Fr_dec in the dual file Fr_bin2dec.m
% It is more complex due to the fact that we need to find the decreasing
% powers of 2 that will match with Fr_dec.
Fr_bin = '.' ;
Fr_dec_Current = str2num (Fr_dec) ;
for k = 1 : 16
if Fr_dec_Current >= 2^(-k)
% Fr_bin = strcat ( Fr_bin, repmat (['0'], 1, k - length(Fr_bin)), ...
% '1' ) ; % Old round about code, but it seems it still works !
Fr_bin = strcat ( Fr_bin, '1' ) ;
Fr_dec_Current = Fr_dec_Current - 2^-(k) ;
% Don't go beyond the pt where the current decremented balance
% is 0 or negative. This will happen if input dec is <= 2^(-16) !
if Fr_dec_Current <= 0 % Uncomment foll when you want to see details
% fprintf ( '\n ********** Fr_dec_Current <= 0 ********** \n' ) ;
% fprintf ( '\n ******* Pausing ... Prees any Key ******* \n' ) ;
% pause
break ;
end
else
% Fr_bin = strcat ( Fr_bin, repmat (['0'], 1, k - length(Fr_bin)), ...
% '0' ) ; % Old round about code, but it seems it still works !
Fr_bin = strcat ( Fr_bin, '0' ) ;
end
end % for k = 1 : 16
% k, Fr_bin, Fr_dec_Current % Uncomment for testing
% Note that since precision is set to 16, the limit in our code is :
% 2^(-16) = 0.0000152587890625
% So, if a fraction is less than 2^(-16), we will have Fr_bin = "."
% at this point.
% ++++++++++++
% 6-b) Also, check at the next level 2^(-k-1) ie, beyond the above k
% to add 1 at the end if Fr_dec_Current >= the half mark.!
% At the limit of k = 16 above, 2^(-17) = 0.00000762939453125
% However, we need to take caution if the no is lower than 2^(-16)
% in which case Fr_bin at this point, would be just '.0000000000000000'
if length(Fr_bin) == 17 & all ( Fr_bin == '.0000000000000000' )
% Note for R13 : If short-circuiting double && were used (not in R12),
% the 2nd expr will NOT be evaluated if the 1st is false
% ie, if false AND X is always false, so X is not computed.
% However, it is observed that even with this single &,
% the 2nd expr is not computed if the 1st expr is false.
if Fr_dec_Current >= 2^-(17)
% Fr_bin = strcat ( Fr_bin, repmat ( ['0'], 1, 16 ), '1' ) ; % Old
Fr_bin = strcat ( Fr_bin, '1' ) ;
Fr_dec_Current = Fr_dec_Current - 2^-(17) ;
if Fr_dec_Current >= 2^(-18)
Fr_bin = strcat ( Fr_bin, '1' ) ;
end
else
% Fr_bin = strcat ( Fr_bin, repmat ( ['0'], 1, 17 ), '1' ) ; % Old
Fr_bin = strcat ( Fr_bin, '0' ) ;
Fr_dec_Current = Fr_dec_Current - 2^-(18) ;
if Fr_dec_Current >= 2^(-19)
Fr_bin = strcat ( Fr_bin, '1' ) ;
end
end
elseif Fr_dec_Current >= 2^(-k-1)
% At this point, normally, k should be 16
% unless at some point above, Fr_dec_Current <= 0
Fr_bin = strcat ( Fr_bin, '1' ) ;
% fprintf ( '\n ************ Last 1 added. ************ \n' ) ;
end
% &&&&&&&&&&&&
% 7) Concatenate the whole integer part and the fraction parts.
Fr_bin = strcat ( bef_bin, Fr_bin ) ;
% Fr_bin
% class_Fr_bin = class(Fr_bin) % = char (Note)
% But note that the dual function :
% Fr_dec = Fr_bin2dec (bin) returns a double !
% ********************
% 8) Some additional Test Cases :
% dec < 2^(-16) = 0.0000152587890625 (nearer to 2^-16 than 2^-17)
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.0000131835937)
% = 0.000000000000000011 (16 0s, 1, 1)
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 0.000000000000000011 ) )
% = 0.000000000000000011 (16 0s, 1, 1)
% Fr_bin2dec ( 0.000000000000000011 ) = 0.000011444091796875
% (= 2^-17 + 2^-18) in place of 0.0000131835937
% ++++++++++++
% dec < 2^(-16) = 0.0000152587890625 (nearer to 2^-17 than 2^-16)
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.0000101835937)
% = 0.00000000000000001 (16 0s, 1)
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 0.00000000000000001 ) )
% = 0.00000000000000001 (16 0s, 1)
% Fr_bin2dec ( 0.00000000000000001 ) = 0.00000762939453125
% (= 2^-17) in place of 0.0000101835937
% ++++++++++++
% Midway betn 2^-17 and 2^-18 = 0.0000057220458984375
% dec < 2^(-17) = 0.00000762939453125 (nearer to 2^-18 than 2^-17)
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.0000056835937)
% = 0.00000000000000000 (17 0s)
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 0.00000000000000000 ) )
% = 0.00000000000000000 (17 0s)
% Fr_bin2dec ( 0.00000000000000000 ) = 0.0
% in place of 0.0000056835937
% ++++++++++++
% dec < 2^(-17) = 0.00000762939453125 (nearer to 2^-17 than 2^-18)
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.0000070835937)
% = 0.000000000000000001 (17 0s, 1)
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 0.000000000000000001 ) )
% = 0.000000000000000001 (17 0s, 1)
% Fr_bin2dec ( 0.000000000000000001 ) = 0.000003814697265625
% (= 2^-18) in place of 0.0000070835937
% ++++++++++++
% dec < 2^(-17) = 0.00000762939453125 (nearer to 2^-18 than 2^-17)
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.0000039935937)
% = 0.00000000000000000 (17 0s)
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 0.00000000000000000 ) )
% = 0.00000000000000000 (17 0s)
% Fr_bin2dec ( 0.00000000000000000 ) = 0.0
% in place of 0.0000039935937
% in place of anything < (2^-17 - 2^-19)
% ie, < Midway betn 2^-17 and 2^-18
% ie, < 0.0000057220458984375
% ++++++++++++
% dec = 0.00001652587890625 very slightly > 2^(-16) = 0.0000152587890625
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (0.00001652587890625)
% = 0.0000000000000001 (15 0s, 1)
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 0.0000000000000001 ) )
% = 0.0000000000000001 (15 0s, 1)
% Fr_bin2dec ( 0.0000000000000001 ) = 0.0000152587890625
% in place of 0.00001652587890625
% ++++++++++++
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (999 + 2^-11 + 2^-9)
% = 1111100111.00000000101
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 1111100111.00000000101 ) )
% = 1111100111.00000000000000000
%
% Fr_bin2dec ( 1111100111.00000000101 ) = 999
% [Fr_dec, str_Fr, Fr_bin] = Fr_bin2dec ( 1111100111.00000000101 )
% gives Fr_dec = 999 in place of 999.00244140625 ,
% str_Fr = 1111100111 and an empty Fr_bin
% because of the precision = 16 limit !
%
% However, Fr_bin2dec ( .00000000101 ) = 0.00244140625
% This shows that if the all the digits of the input bin are used
% for a pure fraction, the results are likely to be more accurate
% since we have more margin wrt the limit of 16 digits.
% ++++++++++++
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (879.0010365625)
% = 1101101111.00000000010000111
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 1101101111.00000000010000111 ) )
% = 1101101111.00000000000000000
% Fr_bin2dec ( 1101101111.00000000010000111 ) = 879
% in place of 879.0010365625
% ++++++++++++
% [Fr_bin, str_Fr, Fr_dec] = Fr_dec2bin (879.0012765625)
% = 1101101111.00000000010100111
%
% Fr_bin = Fr_dec2bin ( Fr_bin2dec ( 1101101111.00000000010100111 ) )
% = 1101101111.00000000000000000
% Fr_bin2dec ( 1101101111.00000000010100111 ) = 879
% in place of 879.0012765625
% ++++++++++++
% ********************
% 9) Some useful values :
% (Pl note that the char length below in each line may cross 80 chars !
% But wrapping will not look nice nor easy to understand !)
%
% 2^-9 = 0.001953125
% 2^-10 = 0.0009765625
% 2^-11 = 0.00048828125
% 2^(-14) = 0.00006103515625
% 2^(-15) = 0.000030517578125
% 2^(-16) = 0.0000152587890625
% 2^(-17) = 0.00000762939453125
% 2^(-18) = 0.000003814697265625
% 2^(-19) = 0.0000019073486328125
%
% The pgm was tested with these values during development.
% These values can be spot-tested by testing the result of Fr_dec2bin ( dec). For eg :
% Fr_dec2bin ( 0.000285828865257397324183692319802554 ) ; = 0.00000000000100101
%
% Test Base = 2^(-15) :
% 2^(-15) + 2^(-16) = 0.0000457763671875 0.0000000000000011
%
% 2^(-15) + 2^(-16.9) = 0.0000386945607188132718216934686662547 0.00000000000000101
% 2^(-15) + 2^(-17) = 0.00003814697265625 0.00000000000000101
% 2^(-15) + 2^(-17.1) = 0.0000376360549281067460325725041667934 0.0000000000000010
%
% 2^(-15) + 2^(-18) = 0.000034332275390625 0.0000000000000010
% 2^(-15) + 2^(-18.01) = 0.0000343059253518563686429336937594913 0.0000000000000010
% 2^(-15) = 0.000030517578125
% Test Base = 2^(-14) :
% 2^(-14) + 2^(-15) = 0.000091552734375 0.000000000000011
% 2^(-14) + 2^(-15.1) = 0.0000895090634624269841302900166671735 0.00000000000001011
%
% 2^(-14) + 2^(-15.55) = 0.0000826143426875777442749281116365005 0.0000000000000101
% 2^(-14) + 2^(-16) = 0.0000762939453125 0.0000000000000101
%
% 2^(-14) + 2^(-16.55) = 0.0000714572163143493310459230799506385 0.00000000000001001
% 2^(-14) + 2^(-17) = 0.00006866455078125 0.00000000000001001
%
% 2^(-14) + 2^(-18) = 0.000064849853515625 0.0000000000000100
% 2^(-14) + 2^(-18.01) = 0.0000648235034768563686429336937594913 0.0000000000000100
% 2^(-14) = 0.00006103515625 0.00000000000001
% Test Base = 2^(-13) :
% 2^(-13) + 2^(-14) = 0.00018310546875 0.00000000000011
% 2^(-13) + 2^(-14.01) = 0.00018268386812970189828693910015186 0.00000000000010111
% 2^(-13) + 2^(-14.1) = 0.000179018126924853968260580033334347 0.00000000000010111
% 2^(-13) + 2^(-14.45) = 0.000166750662107715619528003885783119 0.00000000000010101
%
% 2^(-13) + 2^(-14.75) = 0.000158362033538901399741134642656265 0.0000000000001010
% 2^(-13) + 2^(-15) = 0.000152587890625 0.000000000000101
%
% 2^(-13) + 2^(-15.75) = 0.000140216173019450699870567321328132 0.0000000000001001
% 2^(-13) + 2^(-16) = 0.0001373291015625 0.0000000000001001
% 2^(-13) = 0.0001220703125 0.0000000000001
% Test Base = 2^(-12) :
% 2^(-12) + 2^(-13) = 0.0003662109375 0.0000000000011
% 2^(-12) + 2^(-13.55) = 0.000327517105514794648367384639605108 0.0000000000010101
%
% 2^(-12) + 2^(-14) = 0.00030517578125 0.00000000000101
% 2^(-12) + 2^(-14.55) = 0.000285828865257397324183692319802554 0.00000000000100101
%
% 2^(-12) + 2^(-15) = 0.000274658203125 0.000000000001001
% 2^(-12) = 0.000244140625 0.000000000001
% Test Base = 2^(-1) :
% 2^(-1) + 2^(-16) = 0.5000152587890625 0.1000000000000001
% 2^(-1) + 2^(-16.1) = 0.500014236953606213492065145008334 0.10000000000000001
% 2^(-1) + 2^(-16.9) = 0.500008176982593813271821693468666 0.10000000000000001
% 2^(-1) + 2^(-17) = 0.50000762939453125 0.10000000000000001
% 2^(-1) + 2^(-17.1) = 0.500007118476803106746032572504167 0.1000000000000000
% 2^(-1) + 2^(-17.99) = 0.500003841230583407283053713600975 0.1000000000000000
% 2^(-1) + 2^(-18) = 0.500003814697265625 0.1000000000000000
% 2^(-1) + 2^(-18.01) = 0.500003788347226856368642933693759 0.1000000000000000
% 2^(-1) = 0.5 0.1
% Test Base = 2^(-16) :
% 2^(-16) + 2^(-16.99) = 0.0000229412502293145661074272019509372 0.00000000000000011
% 2^(-16) + 2^(-17) = 0.00002288818359375 0.00000000000000011
% 2^(-16) + 2^(-17.01) = 0.0000228354835162127372858673875189825 0.0000000000000001
%
% 2^(-16) + 2^(-17.99) = 0.0000191000196459072830537136009754686 0.0000000000000001
% 2^(-16) + 2^(-18) = 0.000019073486328125 0.0000000000000001
% 2^(-16) + 2^(-18.01) = 0.0000190471362893563686429336937594913 0.0000000000000001
%
% 2^(-16) + 2^(-19) = 0.0000171661376953125 0.0000000000000001
% 2^(-16) = 0.0000152587890625 0.0000000000000001
% Test Base = 2^(-17) :
% 2^(-17) = 0.00000762939453125 0.00000000000000001
% Test Base = 2^(-18) :
% 2^(-18) = 0.000003814697265625 0.0000000000000000
% Midway betn 2^-17 and 2^-18 = 0.0000057220458984375 is just above 0 ;
% 0.0000057220458984375 is the limit for this set of programmes.
% Anything < (2^-17 - 2^-19) ie, anything < 0.0000057220458984375 is 0.
%
% ********************
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/5396-conversion-of-fractions-from-binary-to-decimal-and-vice-versa/Fr_dec2bin.m"}
|
import json
import random
import pdb
import rdkit.Chem as Chem
import numpy as np
from tqdm import tqdm
import utils.data_utils as data_utils
from template.rdchiral.main import rdchiralRun, rdchiralReaction, rdchiralReactants
def main():
with open('template/templates_train.json', 'r+') as template_file:
template_list = json.load(template_file)
template_list = list(template_list.keys())
data_dir = 'data/stanford_no_rxn'
train_src_data, _ = data_utils.read_src_tgt_files(
data_dir=data_dir, data_type='train')
random.shuffle(train_src_data)
train_src_data = train_src_data[:500]
n_r_counts = []
n_t_counts = []
for train_idx, train_smiles in enumerate(tqdm(train_src_data)):
mol = Chem.MolFromSmiles(train_smiles)
n_possible_bonds = 0
for bond in mol.GetBonds():
if not bond.IsInRing() and not bond.GetIsAromatic():
bond_type = bond.GetBondType()
if bond_type == Chem.rdchem.BondType.SINGLE:
n_possible_bonds += 1
n_r_counts.append(n_possible_bonds)
possible_temps = set()
rd_rct = rdchiralReactants(train_smiles)
for template in template_list:
rd_rxn = rdchiralReaction(template)
outcome_list = rdchiralRun(rd_rxn, rd_rct)
for outcome_smiles in outcome_list:
possible_temps.add(outcome_smiles)
n_t_counts.append(len(possible_temps))
n_r_counts = np.array(n_r_counts)
n_t_counts = np.array(n_t_counts)
pdb.set_trace()
# for template in
if __name__ == '__main__':
main()
|
{"hexsha": "ee05143f8c04f02f6270a0364eecfb949daafba7", "size": 1642, "ext": "py", "lang": "Python", "max_stars_repo_path": "template/count_freq.py", "max_stars_repo_name": "sw32-seo/GTA", "max_stars_repo_head_hexsha": "86b102a14b78f6c8b50d742a56445c748e59b51e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-09-30T16:28:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T05:20:27.000Z", "max_issues_repo_path": "template/count_freq.py", "max_issues_repo_name": "sw32-seo/GTA", "max_issues_repo_head_hexsha": "86b102a14b78f6c8b50d742a56445c748e59b51e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "template/count_freq.py", "max_forks_repo_name": "sw32-seo/GTA", "max_forks_repo_head_hexsha": "86b102a14b78f6c8b50d742a56445c748e59b51e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8070175439, "max_line_length": 83, "alphanum_fraction": 0.6729598051, "include": true, "reason": "import numpy", "num_tokens": 398}
|
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.datasets.dataset_common."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from rigl.experimental.jax.datasets import dataset_base
from rigl.experimental.jax.datasets import dataset_factory
class DatasetCommonTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._batch_size = 32
self._batch_size_test = 10
self._shuffle_buffer_size = 128
def _create_dataset(self, dataset_name):
"""Helper function for creating a dataset."""
return dataset_factory.create_dataset(
dataset_name,
self._batch_size,
self._batch_size_test,
shuffle_buffer_size=self._shuffle_buffer_size)
def test_dataset_supported(self):
"""Tests supported datasets."""
for dataset_name in dataset_factory.DATASETS:
dataset = self._create_dataset(dataset_name)
self.assertIsInstance(dataset, dataset_base.Dataset)
@parameterized.parameters(*dataset_factory.DATASETS.keys())
def test_dataset_train_iterators(self, dataset_name):
"""Tests dataset's train iterator."""
dataset = self._create_dataset(dataset_name)
sample = next(dataset.get_train())
with self.subTest(name='{}_sample'.format(dataset_name)):
self.assertNotEmpty(sample)
with self.subTest(name='{}_label_type'.format(dataset_name)):
self.assertIsInstance(sample['label'], np.ndarray)
with self.subTest(name='{}_label_batch_size'.format(dataset_name)):
self.assertLen(sample['label'], self._batch_size)
with self.subTest(name='{}_image_type'.format(dataset_name)):
self.assertIsInstance(sample['image'], np.ndarray)
with self.subTest(name='{}_image_shape'.format(dataset_name)):
self.assertLen(sample['image'].shape, 4)
with self.subTest(name='{}_image_batch_size'.format(dataset_name)):
self.assertEqual(sample['image'].shape[0], self._batch_size)
with self.subTest(
name='{}_non_zero_image_dimensions'.format(dataset_name)):
self.assertGreater(sample['image'].shape[1], 1)
@parameterized.parameters(*dataset_factory.DATASETS.keys())
def test_dataset_test_iterators(self, dataset_name):
"""Tests dataset's test iterator."""
dataset = self._create_dataset(dataset_name)
sample = next(dataset.get_test())
with self.subTest(name='{}_sample'.format(dataset_name)):
self.assertNotEmpty(sample)
with self.subTest(name='{}_label_type'.format(dataset_name)):
self.assertIsInstance(sample['label'], np.ndarray)
with self.subTest(name='{}_label_batch_size'.format(dataset_name)):
self.assertLen(sample['label'], self._batch_size_test)
with self.subTest(name='{}_image_type'.format(dataset_name)):
self.assertIsInstance(sample['image'], np.ndarray)
with self.subTest(name='{}_image_shape'.format(dataset_name)):
self.assertLen(sample['image'].shape, 4)
with self.subTest(name='{}_image_batch_size'.format(dataset_name)):
self.assertEqual(sample['image'].shape[0], self._batch_size_test)
with self.subTest(
name='{}_non_zero_image_dimensions'.format(dataset_name)):
self.assertGreater(sample['image'].shape[1], 1)
def test_dataset_unsupported(self):
"""Tests unsupported datasets."""
with self.assertRaisesRegex(ValueError, 'No such dataset: unsupported'):
self._create_dataset('unsupported')
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "759c18390427c19e476ce857086bdeeecd299411", "size": 4060, "ext": "py", "lang": "Python", "max_stars_repo_path": "rigl/experimental/jax/datasets/dataset_factory_test.py", "max_stars_repo_name": "vishalbelsare/rigl", "max_stars_repo_head_hexsha": "f18abc7d82ae3acc6736068408a0186c9efa575c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 276, "max_stars_repo_stars_event_min_datetime": "2019-11-25T22:05:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:55:34.000Z", "max_issues_repo_path": "rigl/experimental/jax/datasets/dataset_factory_test.py", "max_issues_repo_name": "vishalbelsare/rigl", "max_issues_repo_head_hexsha": "f18abc7d82ae3acc6736068408a0186c9efa575c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-02-26T14:53:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T16:27:28.000Z", "max_forks_repo_path": "rigl/experimental/jax/datasets/dataset_factory_test.py", "max_forks_repo_name": "vishalbelsare/rigl", "max_forks_repo_head_hexsha": "f18abc7d82ae3acc6736068408a0186c9efa575c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 54, "max_forks_repo_forks_event_min_datetime": "2019-11-26T18:50:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T20:08:08.000Z", "avg_line_length": 36.5765765766, "max_line_length": 76, "alphanum_fraction": 0.7315270936, "include": true, "reason": "import numpy", "num_tokens": 879}
|
using Test
using jInvVis
using jInv.Mesh
# tests for regular mesh
domain = [0 1.1 0 1.0 0 1.1]
n = [8 5 3]
Mr = getRegularMesh(domain,n)
xc = getCellCenteredGrid(Mr)
println("=== test viewSlice2D ===")
figure(1); clf()
subplot(1,3,1)
viewSlice2D(xc[:,1],Mr,Int(round(n[3]/2)))
xlabel("x, intensity increases --> ")
ylabel("intensity constant")
title("f(x,y,z) = x")
subplot(1,3,2)
viewSlice2D(xc[:,1],Mr,Int(round(n[2]/2)),view=:xz,addLabel=true)
xlabel("x, intensity increases --> ")
title("f(x,y,z) = x")
subplot(1,3,3)
viewSlice2D(xc[:,2],Mr,Int(round(n[1]/2)),view=:yz,addLabel=true)
xlabel("y, intensity increases --> ")
title("f(x,y,z) = y")
|
{"hexsha": "9c7d7bae6de7719a67505d5421e5ec9431661b04", "size": 665, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/testViewSlice2D.jl", "max_stars_repo_name": "JuliaInv/jInvVis.jl", "max_stars_repo_head_hexsha": "49b3a041734b043ed945ee06e41a3988052554f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-29T20:52:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-29T20:52:09.000Z", "max_issues_repo_path": "test/testViewSlice2D.jl", "max_issues_repo_name": "JuliaInv/jInvVis.jl", "max_issues_repo_head_hexsha": "49b3a041734b043ed945ee06e41a3988052554f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-04-04T09:18:16.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-15T04:43:24.000Z", "max_forks_repo_path": "test/testViewSlice2D.jl", "max_forks_repo_name": "JuliaInv/jInvVis.jl", "max_forks_repo_head_hexsha": "49b3a041734b043ed945ee06e41a3988052554f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-03-31T15:30:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:13:48.000Z", "avg_line_length": 23.75, "max_line_length": 65, "alphanum_fraction": 0.6390977444, "num_tokens": 245}
|
""" Testing CULVERT (Changing from Horizontal Abstraction to Vertical Abstraction
This example includes a Model Topography that shows a TYPICAL Headwall Configuration
The aim is to change the Culvert Routine to Model more precisely the abstraction
from a vertical face.
The inflow must include the impact of Approach velocity.
Similarly the Outflow has MOMENTUM Not just Up welling as in the Horizontal Style
abstraction
"""
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
print('Starting.... Importing Modules...')
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross
from anuga.shallow_water import Domain, Reflective_boundary,\
Dirichlet_boundary,\
Transmissive_boundary, Time_boundary
from anuga.culvert_flows.culvert_class import Culvert_flow
from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model
from math import pi,pow,sqrt
import numpy as num
#------------------------------------------------------------------------------
# Setup computational domain
#------------------------------------------------------------------------------
print('Setting up domain')
length = 40.
width = 5.
dx = dy = 1 # Resolution: Length of subdivisions on both axes
#dx = dy = .5 # Resolution: Length of subdivisions on both axes
#dx = dy = .5 # Resolution: Length of subdivisions on both axes
#dx = dy = .1 # Resolution: Length of subdivisions on both axes
points, vertices, boundary = rectangular_cross(int(old_div(length,dx)), int(old_div(width,dy)),
len1=length, len2=width)
domain = Domain(points, vertices, boundary)
domain.set_name('Test_Culv_Flat_WL') # Output name
domain.set_default_order(2)
domain.H0 = 0.01
domain.tight_slope_limiters = 1
print('Size', len(domain))
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
def topography(x, y):
"""Set up a weir
A culvert will connect either side
"""
# General Slope of Topography
z=old_div(-x,1000)
# NOW Add bits and Pieces to topography
N = len(x)
for i in range(N):
# Sloping Embankment Across Channel
if 5.0 < x[i] < 10.1:
if 1.0+(x[i]-5.0)/5.0 < y[i] < 4.0 - (x[i]-5.0)/5.0: # Cut Out Segment for Culvert FACE
z[i]=z[i]
else:
z[i] += 0.5*(x[i] -5.0) # Sloping Segment U/S Face
if 10.0 < x[i] < 12.1:
z[i] += 2.5 # Flat Crest of Embankment
if 12.0 < x[i] < 14.5:
if 2.0-(x[i]-12.0)/2.5 < y[i] < 3.0 + (x[i]-12.0)/2.5: # Cut Out Segment for Culvert FACE
z[i]=z[i]
else:
z[i] += 2.5-1.0*(x[i] -12.0) # Sloping D/S Face
return z
print('Setting Quantities....')
domain.set_quantity('elevation', topography) # Use function for elevation
domain.set_quantity('friction', 0.01) # Constant friction
domain.set_quantity('stage',
expression='elevation') # Dry initial condition
#------------------------------------------------------------------------------
# Setup specialised forcing terms
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Setup CULVERT INLETS and OUTLETS in Current Topography
#------------------------------------------------------------------------------
print('DEFINING any Structures if Required')
# DEFINE CULVERT INLET AND OUTLETS
culvert_rating = Culvert_flow(domain,
culvert_description_filename='example_rating_curve.csv',
end_point0=[9.0, 2.5],
end_point1=[13.0, 2.5],
verbose=True)
culvert_energy = Culvert_flow(domain,
label='Culvert No. 1',
description='This culvert is a test unit 1.2m Wide by 0.75m High',
end_point0=[9.0, 2.5],
end_point1=[13.0, 2.5],
width=1.20,height=0.75,
culvert_routine=boyd_generalised_culvert_model,
number_of_barrels=1,
update_interval=2,
log_file=True,
discharge_hydrograph=True,
verbose=True)
domain.forcing_terms.append(culvert_energy)
#------------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
print('Setting Boundary Conditions')
Bi = Dirichlet_boundary([0.0, 0.0, 0.0]) # Inflow based on Flow Depth and Approaching Momentum !!!
Br = Reflective_boundary(domain) # Solid reflective wall
Bo = Dirichlet_boundary([-5, 0, 0]) # Outflow
Btus = Time_boundary(domain, lambda t: [0.0+ 1.25*(1+num.sin(old_div(2*pi*(t-4),10))), 0.0, 0.0])
Btds = Time_boundary(domain, lambda t: [0.0+ 0.75*(1+num.sin(old_div(2*pi*(t-4),20))), 0.0, 0.0])
domain.set_boundary({'left': Btus, 'right': Btds, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
#for t in domain.evolve(yieldstep = 1, finaltime = 25):
# print domain.timestepping_statistics()
#import sys; sys.exit()
# Profiling code
import time
t0 = time.time()
s = 'for t in domain.evolve(yieldstep = 1, finaltime = 25): domain.write_time()'
import profile, pstats
FN = 'profile.dat'
profile.run(s, FN)
print('That took %.2f seconds' %(time.time()-t0))
S = pstats.Stats(FN)
#S.sort_stats('time').print_stats(20)
s = S.sort_stats('cumulative').print_stats(30)
print(s)
|
{"hexsha": "714eabd22daeb6b4f1a8f63f38be6f5a85ec8118", "size": 6336, "ext": "py", "lang": "Python", "max_stars_repo_path": "anuga/culvert_flows/tests/run_culvert_flat_water_lev.py", "max_stars_repo_name": "samcom12/anuga_core", "max_stars_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_stars_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_stars_count": 136, "max_stars_repo_stars_event_min_datetime": "2015-05-07T05:47:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T03:07:40.000Z", "max_issues_repo_path": "anuga/culvert_flows/tests/run_culvert_flat_water_lev.py", "max_issues_repo_name": "samcom12/anuga_core", "max_issues_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_issues_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_issues_count": 184, "max_issues_repo_issues_event_min_datetime": "2015-05-03T09:27:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-20T04:22:48.000Z", "max_forks_repo_path": "anuga/culvert_flows/tests/run_culvert_flat_water_lev.py", "max_forks_repo_name": "samcom12/anuga_core", "max_forks_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_forks_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_forks_count": 70, "max_forks_repo_forks_event_min_datetime": "2015-03-18T07:35:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T07:07:29.000Z", "avg_line_length": 35.595505618, "max_line_length": 107, "alphanum_fraction": 0.5115214646, "include": true, "reason": "import numpy", "num_tokens": 1443}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import joblib
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
__author__ = "Fengming Liu, Longzhen Li, Shaomiao Yin"
__status__ = "Development"
def classifier_run(clf, name, x_train, x_test, y_train, y_test):
clf.fit(x_train,y_train)
y_pred = clf.predict(x_test)
return [accuracy_score(y_test, y_pred), confusion_matrix(y_test, y_pred), clf]
#company_list = ["apple", "amazon", "facebook", "google", "microsoft", "netflix", "tesla"]
features_list = [["relative_day", "past_3_days_senti_avg"],
["relative_day", "past_7_days_senti_avg"],
["relative_day", "1_day_sentiment_score"],
["1_day_sentiment_score"],
["past_3_days_senti_avg"],
["past_7_days_senti_avg"],
["1_day_news_count"],
["1_day_overall_sentiment_score"],
["1_day_sentiment_score","1_day_news_count"],
["1_day_sentiment_score","1_day_news_count","past_3_days_senti_avg"],
["1_day_sentiment_score","1_day_news_count","past_3_days_senti_avg","past_7_days_senti_avg"],
["1_day_sentiment_score","company_apple","company_amazon", "company_facebook", "company_google", "company_microsoft", "company_netflix", "company_tesla"],
["1_day_sentiment_score","1_day_news_count","company_apple","company_amazon", "company_facebook", "company_google", "company_microsoft", "company_netflix", "company_tesla"],
["1_day_sentiment_score","1_day_news_count","past_3_days_senti_avg","past_7_days_senti_avg","company_apple","company_amazon", "company_facebook", "company_google", "company_microsoft", "company_netflix", "company_tesla"],
]
response_list = ["up_cat"]
result = open("./results/clf_results.csv", "w")
alg_dict = {"KNN": KNeighborsClassifier(),
"DecisionTree": DecisionTreeClassifier(criterion='entropy'),
"SVM": SVC(gamma='auto'),
}
for response in response_list:
for features in features_list:
# write info
result.write("features:,")
for feat in features:
result.write(feat + ',')
result.write('\n')
result.write("response:," + response + '\n')
result.write(" ,")
for alg_name, clf in alg_dict.items():
result.write(alg_name + ',')
result.write('\n')
# do ML
###############################
total_df = pd.read_csv("./total_clf.csv")
x_train, x_test, y_train, y_test = train_test_split(total_df[features].to_numpy(),
total_df[response],
test_size=0.3,
shuffle=True,
random_state=500)
result.write(' ,')
for alg_name, clf in alg_dict.items():
print(features, response, alg_name)
[accuracy, cm, clf] = classifier_run(clf, alg_name, x_train, x_test, y_train, y_test)
joblib.dump(clf, "./models/" + alg_name + "_model.joblib")
print(cm)
result.write(str(accuracy) + ',')
result.write('\n')
result.write('\n')
result.write('\n')
result.close()
|
{"hexsha": "4a1f8b8544c98cf17015e9a1dad8f06fe3bfabc1", "size": 3663, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/data_analysis/others/clf_models.py", "max_stars_repo_name": "mcpeixoto/Sentrade", "max_stars_repo_head_hexsha": "55f65508d6b565b99840c9ce5d757185f5027164", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-28T18:40:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T08:29:29.000Z", "max_issues_repo_path": "data/data_analysis/others/clf_models.py", "max_issues_repo_name": "ZiyouZhang/Sentrade", "max_issues_repo_head_hexsha": "c88d20a858de6d05649f99230ca2b44f4c76cd3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/data_analysis/others/clf_models.py", "max_forks_repo_name": "ZiyouZhang/Sentrade", "max_forks_repo_head_hexsha": "c88d20a858de6d05649f99230ca2b44f4c76cd3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-10T22:32:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T21:28:47.000Z", "avg_line_length": 45.7875, "max_line_length": 238, "alphanum_fraction": 0.598962599, "include": true, "reason": "import numpy", "num_tokens": 816}
|
function [propValue, srcObj] = get(obj, propName)
% Accessor for reading BstPanel attributes.
% @=============================================================================
% This function is part of the Brainstorm software:
% https://neuroimage.usc.edu/brainstorm
%
% Copyright (c) University of Southern California & McGill University
% This software is distributed under the terms of the GNU General Public License
% as published by the Free Software Foundation. Further details on the GPLv3
% license can be found at http://www.gnu.org/copyleft/gpl.html.
%
% FOR RESEARCH PURPOSES ONLY. THE SOFTWARE IS PROVIDED "AS IS," AND THE
% UNIVERSITY OF SOUTHERN CALIFORNIA AND ITS COLLABORATORS DO NOT MAKE ANY
% WARRANTY, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF
% MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, NOR DO THEY ASSUME ANY
% LIABILITY OR RESPONSIBILITY FOR THE USE OF THIS SOFTWARE.
%
% For more information type "brainstorm license" at command prompt.
% =============================================================================@
%
% Authors: Francois Tadel, 2008
% Check if the field propName exists in the object
if (ismember(propName, fields(obj)))
% If it exists, try to return its value
if (length(obj) == 1)
propValue = obj.(propName);
srcObj = obj;
else
propValue = {obj.(propName)};
srcObj = obj;
end
else
%iPanel = find(cellfun(@(f)isfield(f, propName), obj.sControls), 1);
if isfield(obj.sControls, propName)
propValue = obj.sControls.(propName);
srcObj = obj;
else
propValue = [];
srcObj = [];
end
end
end
|
{"author": "brainstorm-tools", "repo": "brainstorm3", "sha": "a892cfaabde1eaa2f9a3ac015c05b73f3739433a", "save_path": "github-repos/MATLAB/brainstorm-tools-brainstorm3", "path": "github-repos/MATLAB/brainstorm-tools-brainstorm3/brainstorm3-a892cfaabde1eaa2f9a3ac015c05b73f3739433a/toolbox/gui/@BstPanel/get.m"}
|
#!/usr/bin/env python
import sys
import argparse
sys.path.append('.')
from scripts.py_featextr_server.base_server import BaseQueryHandler, start_query_server
import numpy as np
from scripts.py_featextr_server.utils import load_embeddings, create_embed_map, robust_cosine_simil
# Exclusive==True means that only one get_scores
# function is executed at at time
class CosineSimilQueryHandler(BaseQueryHandler):
def __init__(self, query_embed_file, doc_embed_file, exclusive, debug_print=False, use_idf=True):
super().__init__(exclusive)
self.debug_print = debug_print
self.use_idf = use_idf
print('Loading answer embeddings from: ' + doc_embed_file)
answ_words, self.answ_embed = load_embeddings(doc_embed_file)
self.answ_embed_map = create_embed_map(answ_words)
if query_embed_file is not None:
print('Loading query embeddings from: ' + query_embed_file)
query_words, self.query_embed = load_embeddings(query_embed_file)
self.query_embed_map = create_embed_map(query_words)
else:
self.query_embed = self.answ_embed
self.query_embed_map = self.answ_embed_map
print('Loading is done!')
def text_entry_to_str(self, te):
arr = []
if self.debug_print:
for winfo in te.entries:
arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))
return 'doc_id=' + te.id + ' ' + ' '.join(arr)
def create_doc_embed(self, is_query, text_entry):
if is_query:
embeds = self.query_embed
embed_map = self.query_embed_map
else:
embeds = self.answ_embed
embed_map = self.answ_embed_map
zerov = np.zeros_like(embeds[0])
res = zerov
for winfo in text_entry.entries:
vect_mult = winfo.qty
if self.use_idf:
vect_mult *= winfo.IDF
word = winfo.word
if word in embed_map:
res += embeds[embed_map[word]] * vect_mult
return res
# This function overrides the parent class
def compute_scores_from_parsed_override(self, query, docs):
if self.debug_print:
print('get_scores', query.id, self.text_entry_to_str(query))
ret = {}
query_embed = self.create_doc_embed(True, query)
if self.debug_print:
print(query_embed)
for d in docs:
if self.debug_print:
print(self.text_entry_to_str(d))
doc_embed = self.create_doc_embed(False, d)
if self.debug_print:
print(doc_embed)
# Regular cosine deals poorly with all-zero vectors
simil = robust_cosine_simil(doc_embed, query_embed)
# simil = (1-cosine(doc_embed, query_embed))
# Note that each element must be an array, b/c
# we can generate more than one feature per document!
ret[d.id] = [simil]
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Serving word-embedding models.')
parser.add_argument('--query_embed', metavar='query embeddings',
default=None, type=str,
help='Optional query embeddings file')
parser.add_argument('--doc_embed', metavar='doc embeddings',
required=True, type=str,
help='document embeddings file')
parser.add_argument('--debug_print', action='store_true',
help='Provide debug output')
parser.add_argument('--port', metavar='server port',
required=True, type=int,
help='Server port')
parser.add_argument('--host', metavar='server host',
default='127.0.0.1', type=str,
help='server host addr to bind the port')
args = parser.parse_args()
multi_threaded = True
start_query_server(args.host, args.port, multi_threaded,
CosineSimilQueryHandler(exclusive=False,
query_embed_file=args.query_embed,
doc_embed_file=args.doc_embed,
debug_print=args.debug_print))
|
{"hexsha": "be171beac128416167eee97c9a6608464421fe2b", "size": 4347, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/py_featextr_server/wordembed_cosine_server.py", "max_stars_repo_name": "prateeksingh0001/FlexNeuART", "max_stars_repo_head_hexsha": "ebc82ca4fe01436374c595db2429bc49fb9e1dd0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/py_featextr_server/wordembed_cosine_server.py", "max_issues_repo_name": "prateeksingh0001/FlexNeuART", "max_issues_repo_head_hexsha": "ebc82ca4fe01436374c595db2429bc49fb9e1dd0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/py_featextr_server/wordembed_cosine_server.py", "max_forks_repo_name": "prateeksingh0001/FlexNeuART", "max_forks_repo_head_hexsha": "ebc82ca4fe01436374c595db2429bc49fb9e1dd0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5294117647, "max_line_length": 101, "alphanum_fraction": 0.6036346906, "include": true, "reason": "import numpy", "num_tokens": 899}
|
[STATEMENT]
lemma wt_int:
assumes wtE: "wtE \<xi>" and wt: "wt T"
shows "intT (tpOf T) (int \<xi> T)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. intT (tpOf T) (int \<xi> T)
[PROOF STEP]
using wt
[PROOF STATE]
proof (prove)
using this:
wt T
goal (1 subgoal):
1. intT (tpOf T) (int \<xi> T)
[PROOF STEP]
apply(induct T)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. wt (Var x) \<Longrightarrow> intT (tpOf (Var x)) (int \<xi> (Var x))
2. \<And>f Tl. \<lbrakk>list_all (\<lambda>a. wt a \<longrightarrow> intT (tpOf a) (int \<xi> a)) Tl; wt (Fn f Tl)\<rbrakk> \<Longrightarrow> intT (tpOf (Fn f Tl)) (int \<xi> (Fn f Tl))
[PROOF STEP]
using wtE
[PROOF STATE]
proof (prove)
using this:
wtE \<xi>
goal (2 subgoals):
1. \<And>x. wt (Var x) \<Longrightarrow> intT (tpOf (Var x)) (int \<xi> (Var x))
2. \<And>f Tl. \<lbrakk>list_all (\<lambda>a. wt a \<longrightarrow> intT (tpOf a) (int \<xi> a)) Tl; wt (Fn f Tl)\<rbrakk> \<Longrightarrow> intT (tpOf (Fn f Tl)) (int \<xi> (Fn f Tl))
[PROOF STEP]
by (auto intro!: intF simp add: list_all2_map_map)
|
{"llama_tokens": 498, "file": "Sort_Encodings_M", "length": 4}
|
from datetime import datetime
import os
from typing import List
import numpy as np
import pandas as pd
import pytest
from drift_report.domain.statistical_report.statistical_feature_report import (
HeatMapData,
)
from drift_report.domain.statistical_report.statistical_report import StatisticalReport
import drift_report.proto.monitoring_manager_pb2 as proto
@pytest.fixture()
def training_data_path():
return os.path.dirname(os.path.realpath(__file__))
@pytest.fixture()
def training_data(training_data_path):
return pd.read_csv(f"{training_data_path}/resources/training_data.csv")
@pytest.fixture()
def signature():
return proto.ModelSignature(
inputs=[
proto.ModelField(name="education", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="marital_status", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="capital_gain", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="capital_loss", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="country", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="age", dtype=proto.DataType.DT_INT32),
proto.ModelField(name="workclass", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="relationship", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="race", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="sex", dtype=proto.DataType.DT_STRING),
proto.ModelField(name="hours_per_week", dtype=proto.DataType.DT_INT32),
],
outputs=[proto.ModelField(name="income", dtype=proto.DataType.DT_INT32)],
)
@pytest.fixture
def adult_report(training_data, signature):
report = StatisticalReport(
filename="test",
file_timestamp=datetime.now(),
model_name="model",
model_version=1,
signature=signature,
training_data=training_data,
production_data=training_data,
)
return report
def check_heatmap_integirty(h: HeatMapData):
# All columns shoud sum up to 1 +- 0.01
return np.allclose(h.intensity.sum(axis=0).min(), 1, atol=0.01)
def test_report_processing(adult_report: StatisticalReport):
adult_report.process()
print(adult_report.to_proto())
print(adult_report.to_json())
def test_heatmap_intensities(adult_report: StatisticalReport):
adult_report.process()
for fr in adult_report.feature_reports:
for bvr in fr.bivariate_reports:
assert check_heatmap_integirty(bvr.production_heatmap)
assert check_heatmap_integirty(bvr.training_heatmap)
|
{"hexsha": "7c80b43d8f72dab853080a55d1f840bdc7fb9f42", "size": 2630, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_report.py", "max_stars_repo_name": "rufusnufus/drift-report-plugin", "max_stars_repo_head_hexsha": "37019491a82e3478d6bfc718962a477266e1fa26", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_report.py", "max_issues_repo_name": "rufusnufus/drift-report-plugin", "max_issues_repo_head_hexsha": "37019491a82e3478d6bfc718962a477266e1fa26", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_report.py", "max_forks_repo_name": "rufusnufus/drift-report-plugin", "max_forks_repo_head_hexsha": "37019491a82e3478d6bfc718962a477266e1fa26", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6052631579, "max_line_length": 87, "alphanum_fraction": 0.7224334601, "include": true, "reason": "import numpy", "num_tokens": 558}
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import multiprocessing
from enum import Enum, auto
from itertools import count, product
from numbers import Number
from typing import (
Any,
Callable,
Iterable,
Iterator,
Optional,
Union,
List,
Tuple,
Dict,
TypeVar,
)
import numpy as np
from skimage.util import view_as_windows
from ._data_view_common_docs import data_view_common_docs
"""
This file holds simple utility functions for operations on numpy arrays.
"""
A = TypeVar("A")
Index = Union[int, List, Tuple, slice, np.ndarray]
Indexer = Union[Index, Tuple[Index, ...]]
def is_index(value) -> bool:
return type(value) in (int, list, tuple, slice, np.ndarray)
def is_indexer(value) -> bool:
if type(value) is tuple:
return all(is_index(v) for v in value)
else:
return is_index(value)
def check_index(index: Index) -> "Index":
if not is_index(index):
raise IndexError(
"Index '{}' is not one of the valid index types: "
"[int, list, tuple, slice, np.ndarray]".format(index)
)
else:
return index
def check_indexer(indexer: Indexer, ndim: int = 2) -> "Indexer":
if not is_indexer(indexer):
raise IndexError(
"Indexer '{}' is not one of the valid indexer types: "
"[Index, Tuple[Index, ...]]".format(indexer)
)
if type(indexer) is tuple:
length = len(indexer)
else:
indexer, length = (indexer,), 1
if not length == ndim and ndim is not None:
raise IndexError(
"Expected indexer of length '{}' but got indexer of length '{}'.".format(
ndim, len(indexer)
)
)
return indexer
def is_one_axis_array(x: np.ndarray) -> bool:
"""Evaluate whether the input array has multiple elements only along one axis
Example:
a.shape = (0,) -> True
a.shape = (5,) -> True
a.shape = (1,) -> True
a.shape = (5,1) -> True
a.shape = (1,5) -> True
a.shape = (5,2) -> False
a.shape = (1,1,5) -> True
a.shape = (2,1,5) -> False
a.shape = (1,1,1,1) -> True
Args:
----
x: np.ndarray, input array to evaluate
Returns:
----
Returns true if input array has multiple elements only along one axis
"""
x_dims = x.shape
# case is when x is one dimensional
if len(x_dims) == 1:
return True
# case when x has only one element, dimension doesn't matter then
if x.size == 1:
return True
# otherwise, x_dims should have none-1's at all except for one value
return np.sum(np.array(x_dims) > 1) == 1
def d1_to_col(array: np.ndarray) -> np.ndarray:
"""
If 1D array, convert to column.
Otherwise return itself
"""
if len(array.shape) < 2:
return array.reshape((-1, 1))
else:
# print("array not 1D, do nothing")
return array
def d1_to_row(array: np.ndarray) -> np.ndarray:
"""
If 1D array, convert to row.
Otherwise return itself
"""
if len(array.shape) < 2:
return array.reshape((1, -1))
else:
# print("array not 1D, do nothing")
return array
def scalar_to_array(value) -> np.ndarray:
if np.isscalar(value):
return np.atleast_1d(value)
else:
return value
def scalar_to_slice_index(value: Index) -> "Index":
value = check_index(value)
if type(value) is int:
return slice(value, value + 1, 1)
else:
return value
def scalars_to_slice_indexer(value: Indexer, ndim: int = 2) -> "Indexer":
if not type(value) is tuple:
indexer = scalar_to_slice_index(check_indexer(value))
else:
indexer = tuple(scalar_to_slice_index(v) for v in check_indexer(value))
return check_indexer(indexer, ndim=ndim)
def raise_1d_to_2d(array: np.ndarray) -> np.ndarray:
# Raise 1D array to 2D array with singleton dimension on right
assert array.ndim == 1 or array.ndim == 2
if array.ndim == 1:
array = array[:, None]
return array
def diagflat(v: np.ndarray, k: int = 0) -> np.ndarray:
"""Wraps "np.diagflat" for >= 2D inputs."""
# Transform flat array to diagonal matrix
v = (
np.diagflat(v, k=k)
if v.ndim == 1
else v[..., None] * np.eye(v.shape[-1], k=k)[(v.ndim - 1) * (None,)]
)
return v
def diagonal(v: np.ndarray, k: int = 0) -> np.ndarray:
"""Wraps "np.diagonal" for >= 2D inputs."""
# Transform diagonal matrix to flat array
v = np.diagonal(v, offset=k, axis1=-2, axis2=-1)
return v
def upcast_ndim(x: np.ndarray, ndim: int) -> np.ndarray:
"""Upcast array to dimensionality.
NumPy matrix operations typically allow array broadcasting such that dimensions between two arrays can either
match or equal 1.
Additionally, dimensions on the left may be missing from the left on one of two arrays, which would otherwise
satisfy the aforementioned constraints. In this case, the lower-rank array is treated as if it had singleton
dimensions to its left, which is equivalent to "upcasting". This allows for matrix operations on arrays of differing
ranks but otherwise compatible shapes.
For more info, see https://numpy.org/doc/stable/user/basics.broadcasting.html.
For example: np.ones((8, 4, 5, 1)) * np.ones((4, 1, 10)) -> np.ones((8, 4, 5, 10))
Args:
----
x: np.ndarray, input array to upcast
ndim: int, dimensionality to upcast to
Returns:
----
Returns upcasted input array
"""
# Upcast array to dimensionality
shape = (ndim - x.ndim) * (1,) + x.shape
if not x.shape == shape:
x = x.reshape(shape)
return x
def upcast_tile(x: np.ndarray, shape: Tuple) -> np.ndarray:
"""Upcasts and tiles array to be match given shape.
For NumPy operations that don't support flexible array broadcasting, it may also be necessary to ensure both arrays
have identical shapes. Singleton dimensions can simply be tiled to match the shape of the other array.
For example: {np.ones((4, 5, 1)), (4, 1, 10)} -> np.ones((4, 5, 10))
Args:
----
x: np.ndarray, input array to upcast and tile
Returns:
----
Returns upcasted and tile input array
"""
# Upcast array then tile singleton dimensions
x = upcast_ndim(x, len(shape))
if not x.shape == shape:
assert not any(not xs == s and not xs == 1 for xs, s in zip(x.shape, shape))
x = np.tile(x, [1 if xs == s else s for xs, s in zip(x.shape, shape)])
return x
class SliceMaker(object):
"""
Utility to enable using slice syntax in functions
"""
def __getitem__(self, item):
return item
"""
Object that can be used to generate slice object via
numpy style slicing notation.
Ex.
```
>>> array = np.ones((5))
>>> new_array = array[make_slice[0:3]]
>>> new_array
array([1., 1., 1.])
```
Use this for the `select_x` methods in `TimeSeries` classes
"""
make_slice = SliceMaker()
"""
Function that act as masks that operate on numpy arrays,
convenience functions to serve as partial functions
"""
def equal_mask(x: np.ndarray, target: float) -> np.ndarray:
return x == target
def less_mask(x: np.ndarray, target: float) -> np.ndarray:
return x < target
def greater_mask(x: np.ndarray, target: float) -> np.ndarray:
return x > target
def le_mask(x: np.ndarray, target: float) -> np.ndarray:
return x <= target
def ge_mask(x: np.ndarray, target: float) -> np.ndarray:
return x >= target
class IntervalTypes(Enum):
CLOSED_OPEN = auto() # [a, b)
OPEN_CLOSED = auto() # (a, b]
CLOSED_CLOSED = auto() # [a, b]
OPEN_OPEN = auto() # (a, b)
def closed_open_range_mask(
x: np.ndarray, end_points: Tuple[float, float]
) -> np.ndarray:
return (x >= end_points[0]) & (x < end_points[1])
def open_closed_range_mask(
x: np.ndarray, end_points: Tuple[float, float]
) -> np.ndarray:
return (x > end_points[0]) & (x <= end_points[1])
def closed_closed_range_mask(
x: np.ndarray, end_points: Tuple[float, float]
) -> np.ndarray:
return (x >= end_points[0]) & (x <= end_points[1])
def open_open_range_mask(x: np.ndarray, end_points: Tuple[float, float]) -> np.ndarray:
return (x > end_points[0]) & (x < end_points[1])
@data_view_common_docs
def chunk(
arr: A,
size: int,
step: int = 1,
ragged: bool = False,
slice_function: Optional[Callable[[A, int, int], A]] = None,
len_function: Optional[Callable[[A], int]] = None,
) -> Iterator[A]:
"""Chunk an array into `size` segments every `step` elements.
Parameters
----------
arr: A
Any object on which the provided slice_function and len_function
operate on.
{size}
{step}
{ragged}
slice_function: Optional[Callable[[A, int, int], A]]
The function that can be used to slice the input array `arr` into
segments. If no slice function is specified, `__slice__` is called.
len_function: Optional[Callable[[A], int]]
The function that can be used to get the length of the input array
`arr`. If no length function is specified, `__len__` is called.
Examples
--------
>>> import numpy as np
>>> list(chunk(np.arange(0, 10), 4, 2))
[array([0, 1, 2, 3]),
array([2, 3, 4, 5]),
array([4, 5, 6, 7]),
array([6, 7, 8, 9])]
See Also
--------
skimage.view_as_windows: Rolling window view of the input n-dimensional array
"""
# These default functions work for 1D numpy arrays
# as well as lists and tuples.
if slice_function is None:
def slice_function(x, start, stop):
return x[start:stop]
if len_function is None:
def len_function(x):
return len(x)
arr_len = len_function(arr)
def compare_value(k):
return k if ragged else k + size - 1
for k in count(start=0, step=step):
if compare_value(k) >= arr_len:
break
else:
yield slice_function(arr, k, k + size)
# I stole these functions from https://stackoverflow.com/a/45555516
def _unpacking_apply_along_axis(
all_args: Tuple[Callable, int, np.ndarray, Tuple, Dict]
) -> np.ndarray:
"""
Like numpy.apply_along_axis(), but with arguments in a tuple
instead.
This function is useful with multiprocessing.Pool().map(): (1)
map() only handles functions that take a single argument, and (2)
this function can generally be imported from a module, as required
by map().
"""
(func1d, axis, arr, args, kwargs) = all_args
return np.apply_along_axis(func1d, axis, arr, *args, **kwargs)
def parallel_apply_along_axis(
func1d: Callable,
axis: int,
arr: np.ndarray,
*args: Tuple,
pool: Optional["multiprocessing.Pool"] = None,
**kwargs: Dict
) -> np.ndarray:
"""
Like numpy.apply_along_axis(), but takes advantage of multiple
cores.
"""
# Effective axis where apply_along_axis() will be applied by each
# worker (any non-zero axis number would work, so as to allow the use
# of `np.array_split()`, which is only done on axis 0):
effective_axis = 1 if axis == 0 else axis
if effective_axis != axis:
arr = arr.swapaxes(axis, effective_axis)
# Chunks for the mapping (only a few chunks):
chunks = [
(func1d, effective_axis, sub_arr, args, kwargs)
for sub_arr in np.array_split(arr, multiprocessing.cpu_count())
]
if pool is None:
pool = multiprocessing.Pool()
close_pool = True
else:
close_pool = False
individual_results = pool.map(_unpacking_apply_along_axis, chunks)
# Freeing the workers:
if close_pool:
pool.close()
pool.join()
return np.concatenate(individual_results)
def grouped(iterable: Iterable[Any], n: int) -> Iterable[Iterable[Any]]:
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
return zip(*[iter(iterable)] * n)
def nan_like(target: Union[Number, np.ndarray]) -> Union[Number, np.ndarray]:
if isinstance(target, Number):
return np.nan
elif isinstance(target, np.ndarray):
out = np.empty(target.shape)
out[:] = np.nan
return out
else:
raise ValueError("Target is not supported")
def window_inplace(
x: np.ndarray, window: Union[int, Tuple[int, int]], causal: bool = False
) -> np.ndarray:
"""Wrapper around "skimage.util.view_as_windows" to support arbitray, causal or acausal window shifts.
The window should either be an integer sample size or a tuple with inclusive left and right bounds relative to
the current sample (considered as 0). The latter allows for arbitrarily time-shifted and asymmetric windows.
Windowing with a for loop and copy is slow and memory-hungry. For an arbitrary array with window W, the size
of the array in memory increases by a factor of W. For large arrays, this can easily fill up RAM. Instead of
copying, one can instead take a strided view into the array that simply repeats the indices to simulated the
windowed array in place without reallocating memory.
Args:
----
x: np.ndarray, input array to window
window: int or tuple pair of ints, window size or inclusive window bounds relative to current sample
causal: bool, when using an integer window whether to distribute the samples causally or not
Returns:
----
Returns windowed input array
"""
# Window in-place (without copying) using memory-efficient stride tricks
if type(window) is int:
window = (
(-((window - 1) // 2), (window - 1) // 2 + (window - 1) % 2)
if not causal
else (-window + 1, 0)
)
assert window[0] <= window[1]
x = np.pad(
x,
((-min(window[0], 0), max(window[1] - 1, 0) + 1),) + (x.ndim - 1) * ((0, 0),),
mode="constant",
)
w = view_as_windows(x, (window[1] - window[0] + 1,) + x.shape[1:])[
None if window[0] < 0 else window[0] : None if window[1] > 0 else window[1] - 1
]
if x.ndim > 1:
w = w.squeeze(axis=tuple(range(1, x.ndim)))
w = w.transpose((0,) + tuple(range(2, w.ndim)) + (1,))
return w
def mix_array(
x: np.ndarray,
nrepeat: Optional[int] = None,
axis: Tuple[int, int] = (0, -1),
mixing: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Multiplies ND array by an independent 2D mixing matrix.
Consider the simplest block-diagonal (mixing) matrix, the identity matrix, for example:
[[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]]
This constitutes no effective mixing or scaling.
An arbitrary block-diagonal matrix contains sub-matrices (blocks) >= 1 x 1 placed along the diagonal as so:
[[A, 0, 0]]
[[0, B, 0]]
[[0, 0, c]]
In practice, the blocks can be of varying (but always rectangular) shapes.
[[6, 4, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 4, 0],
[0, 0, 3, 8, 5, 0],
[0, 0, 6, 7, 1, 0],
[0, 0, 0, 0, 0, 1]]
However, unlike "scipy.linalg.block_diag," the mixing matrix need not be a block-digonal matrix, and can instead
mixing the array with arbitray mixing patterns.
Args:
----
x: np.ndarray, input array to mix
nrepeat: int, number of times to effectively repeat array when using the default identity mixing matrix
axis: tuple, pair of axes to multiply mixing matrix with, with the latter being repeated 2nd dimension of mixing
matrix # of times
mixing: np.ndarray, mixing matrix to multiply input array with
Returns:
----
Returns mixed input array
"""
# Parse axes
assert x.ndim >= 2
assert len(axis) == 2
axis = [ax % x.ndim for ax in axis]
if axis[0] == axis[1]:
x = x[(axis[1] + 1) * (slice(None, None, None),) + (None,)]
if nrepeat is None:
nrepeat = x.shape[axis[0]]
assert nrepeat == x.shape[axis[0]] or x.shape[axis[0]] == 1 or nrepeat == 1
# Apply mixing matrix
x = x[(axis[1] + 1) * (slice(None, None, None),) + (None,)]
if mixing is None:
mixing = np.eye(nrepeat)
else:
assert mixing.ndim == 2
mixing = mixing[
(min(axis) + int(axis[1] < axis[0])) * (None,)
+ (slice(None, None, None),)
+ (abs(axis[1] - axis[0]) - int(axis[1] < axis[0])) * (None,)
+ (slice(None, None, None),)
]
x = x * mixing
x = x.reshape(x.shape[: axis[1]] + (-1,) + x.shape[axis[1] + 2 :])
return x
def upcast_apply(
func: Callable[[Any], Any],
*args: Tuple,
narr: Optional[int] = None,
sub_ndim: int = 2,
**kwargs: Dict
) -> Optional[Any]:
"""
Allows normally *D-only (typically linear algebra) functions to support ND inputs. Many, but not all, NumPy and
SciPy functions support this natively for functions that normally operate on 2D inputs.
Args:
----
func: callable, function to call on *D slices of upcasted input arrays
*args: variable arguments, arrays to provide function followed by any positional arguments
narr: int, number of positional arguments to consider to be arrays (otherwise inferred)
sub_ndim: int, dimensionality of array slices to provide function
**kwargs: variable named arguments, named keyword arguments to provide function
Returns:
----
Returns mixed input array
"""
# Parse arrays
if narr is None:
arr = ()
for i, arg in enumerate(args):
if isinstance(arg, np.ndarray):
arr += (arg,)
else:
args = args[i:]
break
else:
arr, args = args[:narr], args[narr:]
ndim = max(a.ndim for a in arr)
assert ndim >= sub_ndim
# Cast inputs as needed then apply function
if ndim == 2:
out = func(*(arr + args), **kwargs)
else:
arr = tuple(upcast_ndim(a, ndim) for a in arr)
assert not any(
not all((s[0] == ss for ss in s or s[0] == 1 or ss == 1) for ss in s)
for s in zip(*[a.shape[:-sub_ndim] for a in arr])
)
samples = tuple(
max(sample) for sample in zip(*[a.shape[:-sub_ndim] for a in arr])
)
out = [
func(
*[
a[tuple(index % s for index, s, in zip(indices, a.shape))]
for a in arr
],
**kwargs
)
for indices in product(*[range(sample) for sample in samples])
]
if not isinstance(out[0], tuple):
out = np.array(out)
else:
for (
i,
o,
) in enumerate(zip(*out)):
if not o[0].shape:
out[i] = np.reshape(o, samples)
elif len(o[0]):
out[i] = np.reshape(o, samples + o[0].shape)
else:
shape, slc = zip(
*[
(s, slice(None, None, None))
if s
else (1, slice(0, 0, None))
for s in o[0].shape
]
)
slc = len(samples) * (slice(None, None, None),) + slc
out[i] = np.empty(samples + shape)[slc]
return out
|
{"hexsha": "a32ecd98c061241b3d58a8773dd28285c247a8c4", "size": 19583, "ext": "py", "lang": "Python", "max_stars_repo_path": "signal_processing/utils/data_view/array_utils.py", "max_stars_repo_name": "dtemir/labgraph", "max_stars_repo_head_hexsha": "34e12f203ba1254291a6b3b08458ede75f1d4383", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-02T15:14:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T15:14:54.000Z", "max_issues_repo_path": "signal_processing/utils/data_view/array_utils.py", "max_issues_repo_name": "bennaaym/labgraph", "max_issues_repo_head_hexsha": "f29eb520335e883bb5244a6e4400cf34a281ec8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "signal_processing/utils/data_view/array_utils.py", "max_forks_repo_name": "bennaaym/labgraph", "max_forks_repo_head_hexsha": "f29eb520335e883bb5244a6e4400cf34a281ec8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3158682635, "max_line_length": 120, "alphanum_fraction": 0.597967625, "include": true, "reason": "import numpy", "num_tokens": 5332}
|
G = CImGui
using Printf
DoGui() do
@cstatic f=Cfloat(0.0) counter=Cint(0) one=false two=false clear_color=copy(CImGuiFrontEnd.default_clear_color) begin
G.Begin("Hello, world!") # create a window called "Hello, world!" and append into it.
TreeNode("First section") do
G.Text("This is some useful text.") # display some text
TreeNode("Secret") do
G.Text("Not really...")
end
TreeNode("Even more secret") do
G.Text("Nothing to see here")
end
end
WithColumns(2, ["A", "B"]) do
@c G.Checkbox("Column 1 Box", &one) # edit bools storing our window open/close state
G.NextColumn()
@c G.Checkbox("Column 2 Box", &two) # edit bools storing our window open/close state
end
@SameLine begin
G.Text("testing")
G.Button("Button") && (counter += 1)
G.Text("counter = $counter")
@c G.Checkbox("Something", &two)
G.Text("$one $two")
end
WithStyleColors(G.ImGuiCol_Text => G.HSV(0.0, 0.9, 0.8)) do
G.Text(@sprintf("Application average %.3f ms/frame (%.1f FPS)", 1000 / G.GetIO().Framerate, G.GetIO().Framerate))
end
G.End()
end
end
|
{"hexsha": "5b11b1255111dfed2a030341a8234ee8cad17ff2", "size": 1317, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/context_wrappers.jl", "max_stars_repo_name": "pengwyn/CImGuiFrontEnd.jl", "max_stars_repo_head_hexsha": "8bb5f0c840c07f5a5cbdf2b1588dab61b6d77704", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/context_wrappers.jl", "max_issues_repo_name": "pengwyn/CImGuiFrontEnd.jl", "max_issues_repo_head_hexsha": "8bb5f0c840c07f5a5cbdf2b1588dab61b6d77704", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/context_wrappers.jl", "max_forks_repo_name": "pengwyn/CImGuiFrontEnd.jl", "max_forks_repo_head_hexsha": "8bb5f0c840c07f5a5cbdf2b1588dab61b6d77704", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9318181818, "max_line_length": 125, "alphanum_fraction": 0.5429005315, "num_tokens": 353}
|
# Copyright 2021 The CLVR Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script for executing Gurobi on selected datasets on the DRO problem with Wasserstein metric
# based ambiguity sets. Command line usage:
# julia run_solver.jl <dataset> <method>
# method: -1=automatic, 0=primal simplex, 1=dual simplex, 2=barrier, 3=concurrent, 4=deterministic concurrent, 5=deterministic concurrent simplex.
using Gurobi
using JuMP
using LinearAlgebra
using SparseArrays
BLAS.set_num_threads(1)
include("../src/problems/standardLP.jl")
include("../src/algorithms/utils/exitcriterion.jl")
include("../src/algorithms/utils/results.jl")
include("../src/algorithms/utils/helper.jl")
include("../src/algorithms/clvr_lazy.jl")
include("../src/problems/dro/utils/libsvm_parser.jl")
include("../src/problems/dro/wasserstein.jl")
DATASET_INFO = Dict([
("a1a", (123, 1605)),
("a9a", (123, 32561)),
("gisette", (5000, 6000)),
("news20", (1355191, 19996)),
("rcv1", (47236, 20242)),
])
# ARGS
dataset = ARGS[1]
gurobimethod = parse(Int, ARGS[2])
# Dataset parameters
if !haskey(DATASET_INFO, dataset)
throw(ArgumentError("Invalid dataset name supplied."))
end
dim_dataset, num_dataset = DATASET_INFO[dataset]
filepath = "./data/$(dataset).txt"
# Problem instance parameters
κ = 0.1
ρ = 10.
# Problem instance instantiation
yX_T = read_libsvm_into_yXT_sparse(filepath, dim_dataset, num_dataset)
A_T, b, c = droreformuation_wmetric_hinge_standardformnormalized(yX_T, κ, ρ)
problem = StandardLinearProgram(A_T, b, c)
# Gurobi
println("Setting up LP")
starttime = time()
vector_model = Model(Gurobi.Optimizer)
set_optimizer_attribute(vector_model, "Threads", 1)
set_optimizer_attribute(vector_model, "Method", gurobimethod)
@variable(vector_model, x[1:size(A_T)[1]] >= 0)
@constraint(vector_model, equality, A_T' * x .== b)
@objective(vector_model, Min, c' * x)
endtime = time()
println("=====> Setting up: $(endtime - starttime)")
println("Solving LP")
starttime = time()
optimize!(vector_model)
endtime = time()
println("=====> Solve time: $(endtime - starttime)")
println(objective_value(vector_model))
println(termination_status(vector_model))
println(primal_status(vector_model))
|
{"hexsha": "98a30a0e5aeb25d4e40240809c2222eb38930cb6", "size": 2708, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/run_solver.jl", "max_stars_repo_name": "ericlincc/Efficient-GLP", "max_stars_repo_head_hexsha": "9c9fce117ac4ff645cc82ed4ed84c75a52b83dea", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/run_solver.jl", "max_issues_repo_name": "ericlincc/Efficient-GLP", "max_issues_repo_head_hexsha": "9c9fce117ac4ff645cc82ed4ed84c75a52b83dea", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/run_solver.jl", "max_forks_repo_name": "ericlincc/Efficient-GLP", "max_forks_repo_head_hexsha": "9c9fce117ac4ff645cc82ed4ed84c75a52b83dea", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4269662921, "max_line_length": 146, "alphanum_fraction": 0.7418759232, "num_tokens": 720}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Dominik Höchemer <dominik.hoechemer@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
import argparse as ap
from datetime import datetime
import os
import pickle
import sys
import warnings
import numpy as np
import sklearn.metrics as metrics
import torch
from src.data.dataset import get_combined_dataset
from src.data.dataset_sampler import DatasetSamplerMultiDataset
from src.data.postprocessing import biternion2deg
from src.data.postprocessing import normalize_orientation_output
from src.data.preprocessing import OrientationAugmentation
from src.data.preprocessing import get_preprocessing
from src.evaluation_utils import get_statistics_binary
from src.evaluation_utils import pr_measures
from src.evaluation_utils import roc_measures
from src.io_utils import create_directory_if_not_exists
from src.logger import CSVLogger
from src.losses import get_new_loss_weights_dwa
from src.losses import VonmisesLossBiternion
from src.lr_decay import LRPolyDecay
from src.models import get_model_by_string
from src.parameters import add_hyperparameters_to_argparser
def _parse_args():
"""Parse command-line arguments"""
desc = 'Train neural network for multi-task person perception'
parser = ap.ArgumentParser(description=desc,
formatter_class=ap.RawTextHelpFormatter)
# dataset -----------------------------------------------------------------
parser.add_argument('-db', '--dataset_basepath',
type=str,
default='./datasets',
help='Path to downloaded dataset')
parser.add_argument('-ds', '--datasets',
type=str,
default='multitask+orientation',
choices=['multitask+orientation',
'multitask',
'orientation'],
help='Datasets to use seperated by +')
parser.add_argument('-rd', '--result_dir',
type=str,
default='./results',
help='Where to store the results')
# hyper parameters --------------------------------------------------------
parser = add_hyperparameters_to_argparser(parser)
# return parsed args
return parser.parse_args()
def main():
args = _parse_args()
batch_size = args.batch_size
dataset_names = args.datasets.split('+')
tasks = args.tasks.split('+')
tasks.sort() # sort so we always have the same order
print('Loading Dataset from ' + args.dataset_basepath)
print('Using datasets: ' + ' '.join(dataset_names))
print('Tasks: ' + ' '.join(tasks))
if 'orientation' in tasks and 'orientation' not in dataset_names:
warnings.warn("No ground-truth data for orientation in datasets")
if 'detection' in tasks and 'multitask' not in dataset_names:
warnings.warn("No non-person data available in datasets")
if 'pose' in tasks and 'multitask' not in dataset_names:
warnings.warn("Only standing persons in datasets")
# create training ID and folder, make sure random id is unique
training_starttime = datetime.now().strftime("%d_%m_%Y-%H_%M_%S-%f")
train_dir = os.path.join(args.result_dir,
f'{args.training_name}__{training_starttime}')
if os.path.exists(train_dir):
raise IOError(f'Output directory: {train_dir} already exists.')
create_directory_if_not_exists(train_dir)
model_dir = os.path.join(train_dir, 'models')
create_directory_if_not_exists(model_dir)
network_outputs_dir = os.path.join(train_dir, 'network_outputs')
create_directory_if_not_exists(network_outputs_dir)
# get preprocessing
data_transform, _, _ = get_preprocessing(args.model)
# augmentation
if args.augmentation == 'flip':
augmentation = OrientationAugmentation()
else:
augmentation = None
# train data
dataset_list_train = get_combined_dataset(dataset_names,
set_name='train',
transform=data_transform,
basepath=args.dataset_basepath,
augmentation=augmentation)
dataset_train = torch.utils.data.ConcatDataset(dataset_list_train)
if args.dataset_combination == 'concat':
detection_train_loader = torch.utils.data.DataLoader(
dataset_train, batch_size=batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=False, drop_last=True
)
elif args.dataset_combination == '50_50':
# create uniform dataset sampler
sampler = DatasetSamplerMultiDataset(dataset_list_train,
batch_size=batch_size,
shuffle=True)
detection_train_loader = torch.utils.data.DataLoader(
dataset_train,
batch_size=batch_size,
sampler=sampler,
num_workers=args.num_workers,
pin_memory=False,
drop_last=True
)
else:
raise ValueError(f"Unknown dataset combination method "
f"{args.dataset_combination}")
# validation data
dataset_list_valid = get_combined_dataset(dataset_names,
set_name='valid',
transform=data_transform,
basepath=args.dataset_basepath)
dataset_valid = torch.utils.data.ConcatDataset(dataset_list_valid)
detection_valid_loader = torch.utils.data.DataLoader(
dataset_valid,
batch_size=2*batch_size, shuffle=False, num_workers=args.num_workers,
pin_memory=False, drop_last=False)
# Store the data loaders
n_batches_train = len(dataset_train) // batch_size
dataset_loaders = {
'train': detection_train_loader,
'valid': detection_valid_loader
}
# load network
# use CUDA if available
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = get_model_by_string(args.model, device)
softmax = torch.nn.Softmax(dim=1)
# optimizer
params_lr = [{'params': model.parameters(), 'lr': args.learning_rate}]
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(params_lr, momentum=args.momentum)
if args.model.startswith('mobilenetv2'):
warnings.warn("\n\nMobileNetV2 should be trained with Adam\n\n")
elif args.optimizer == 'adam':
optimizer = torch.optim.Adam(params_lr)
else:
raise NotImplementedError(f"Optimizer {args.optimizer} not yet "
f"implemented")
# losses and loss weights
criterions = {
'detection': torch.nn.CrossEntropyLoss(reduction='none'),
'orientation': VonmisesLossBiternion(kappa=args.kappa),
'pose': torch.nn.CrossEntropyLoss(reduction='none', ignore_index=-100)
}
weights = {'detection': args.weight_detection,
'orientation': args.weight_orientation,
'pose': args.weight_pose}
weights_loss_history = {'detection': [],
'orientation': [],
'pose': []}
# lr decay
lr_decay = LRPolyDecay(args.learning_rate,
power=args.learning_rate_decay,
max_iter=args.n_epochs*n_batches_train,
lr_min=1e-6)
# logging
csvlogger = CSVLogger(os.path.join(train_dir, 'training.csv'))
# dump the parameters which were given to the script
with open(os.path.join(train_dir, 'argument_list.txt'), 'w') as f:
f.write(' '.join([f'--{k} {v}' for k, v in vars(args).items()]) + '\n')
# train loop
for epoch in range(args.n_epochs): # loop over the dataset
running_loss_train = 0.0
# create dicts for labels and scores
labels = {'train': {}, 'valid': {}}
scores = {'train': {}, 'valid': {}}
losses = {'train': {}, 'valid': {}}
metainfos = {'train': {}, 'valid': {}}
losses_by_task = {'train': {}, 'valid': {}}
losses_overall = {'train': 0.0, 'valid': 0.0}
for phase in ['train', 'valid']:
for task in tasks:
labels[phase][task] = []
scores[phase][task] = []
losses[phase][task] = []
for info in ['dataset', 'category_name', 'tape_name', 'person_id',
'image_id', 'instance_id']:
metainfos[phase][info] = []
for phase in ['train', 'valid']:
if phase == 'train':
model.train() # set model to train (dropout enabled etc.)
else:
model.eval() # set model to eval (dropout disabled etc.)
print('Validation...')
# iterate through dataset
for batch_idx, batch_data in enumerate(dataset_loaders[phase]):
input_images = batch_data['image'].to(device, non_blocking=True)
targets = {}
if 'detection' in tasks:
targets['detection'] = \
batch_data['is_person'].to(device, non_blocking=True).long()
if 'orientation' in tasks:
orientations_deg = np.array(batch_data['orientation'])
orientations_bit = \
np.array([np.cos(np.deg2rad(orientations_deg)),
np.sin(np.deg2rad(orientations_deg))],
dtype=np.float32)
orientations_bit = \
torch.from_numpy(orientations_bit.transpose((1, 0)))
targets['orientation'] = \
orientations_bit.contiguous().to(device,
non_blocking=True)
if 'pose' in tasks:
targets['pose'] = batch_data['pose'].to(device,
non_blocking=True)
# reset gradients
optimizer.zero_grad()
# track gradients only in train phase
with torch.set_grad_enabled(phase == 'train'):
output = model(input_images)
predictions = {
'detection': output[:, 0:2],
'orientation': normalize_orientation_output(output[:, 2:4]),
'pose': output[:, 4:7]
}
loss = 0
for task in tasks:
if task == 'orientation':
# mask out only the orientation samples
mask = [torch.from_numpy(np.array(batch_data['dataset']) == 'orientation')]
elif task == 'pose':
# mask out patches without pose
mask = targets[task] != -100
else:
# this will return all elements
mask = None
loss_ = criterions[task](predictions[task],
targets[task])
# loss for backpropagation
loss += weights[task] * loss_[mask].mean()
# (unmasked) loss for stats and running mean later
losses[phase][task].append(loss_.detach())
# run backpropagation and parameter update
if phase == 'train':
loss.backward()
optimizer.step()
lr_decay.update_optimizer(optimizer)
# statistics
# store all batch labels, scores and meta information
for task in tasks:
labels[phase][task].append(targets[task].clone())
scores[phase][task].append(predictions[task].detach())
for info in ['dataset', 'category_name', 'tape_name',
'person_id', 'image_id', 'instance_id']:
metainfos[phase][info].append(batch_data[info])
if phase == 'train':
running_loss_train += loss.item()
if batch_idx % 10 == 9:
# print every 10 mini-batches
running_loss_train_mean = \
running_loss_train / (batch_idx + 1)
print(f'[{(epoch+1):d}, {(batch_idx+1): 5d}] '
f'train_loss: {running_loss_train_mean:.6f}')
# phase (train or valid) completed
for task in tasks:
labels[phase][task] = torch.cat(labels[phase][task])
scores[phase][task] = torch.cat(scores[phase][task])
losses[phase][task] = torch.cat(losses[phase][task])
for info in ['dataset', 'category_name', 'tape_name', 'person_id',
'image_id', 'instance_id']:
metainfos[phase][info] = np.concatenate(metainfos[phase][info])
# calculate additional losses
for task in tasks: # calculate losses by task
if task == 'orientation':
mask = metainfos[phase]['category_name'] == 'person-standing-deeporientation'
losses_by_task[phase][task] = losses[phase][task][mask].mean().item()
elif task == 'pose':
# mask out person-Without-Pose
mask = labels[phase][task] != -100
losses_by_task[phase][task] = losses[phase][task][mask].mean().item()
else:
losses_by_task[phase][task] = losses[phase][task].mean().item()
losses_overall[phase] += weights[task] * losses_by_task[phase][task]
if phase == 'train':
weights_loss_history[task].append(losses_by_task[phase][task])
# move everything to cpu and convert to python structures
for task in tasks:
labels[phase][task] = labels[phase][task].cpu().numpy().tolist()
scores[phase][task] = scores[phase][task].cpu().numpy().tolist()
losses[phase][task] = losses[phase][task].cpu().numpy().tolist()
# end of epoch reached
# save weights
torch.save(model.state_dict(),
os.path.join(model_dir, f'epoch_{epoch}.pt'))
# dump all results
with open(os.path.join(network_outputs_dir,
f'epoch_{epoch}.pkl'), 'wb') as f:
pickle.dump({'labels': labels,
'scores': scores,
'losses': losses,
'metainfos': metainfos},
f, pickle.HIGHEST_PROTOCOL)
# create logs for csvlogger
logs = {'train_loss': losses_overall['train'],
'valid_loss': losses_overall['valid'],
'weight_detection': weights['detection'],
'weight_orientation': weights['orientation'],
'weight_pose': weights['pose']}
for i, lr in enumerate(lr_decay.get_current_lr()):
logs[f'lr_{i}'] = lr
for phase in ['train', 'valid']:
for task, l in losses_by_task[phase].items():
logs[f'{phase}_{task}_loss'] = l
# calculate statistics for validation data
# detection
if 'detection' in tasks:
gt = np.array(labels['valid']['detection'])
pred = softmax(torch.tensor(scores['valid']['detection'])).numpy()
statistics_classification = get_statistics_binary(gt, pred)
roc_classification = roc_measures(statistics_classification)
ber = roc_classification['best_balanced_error_rate']
logs['detection_valid_balanced_accuracy'] = 1 - ber[0]
logs['detection_valid_balanced_accuracy_thresh'] = ber[1]
pr_classification = pr_measures(statistics_classification)
f1 = pr_classification['best_f1_score']
logs['detection_valid_f1'] = f1[0]
logs['detection_valid_f1_thresh'] = f1[1]
# orientation
if 'orientation' in tasks:
mask = metainfos[phase]['dataset'] == 'orientation'
gt = np.array(labels['valid']['orientation'])[mask]
pred = np.array(scores['valid']['orientation'])[mask]
angle_errors = biternion2deg(pred) - biternion2deg(gt)
# https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles
angle_errors = (angle_errors + 180.0) % 360.0 - 180.0
angle_mse = np.mean(np.abs(angle_errors))
logs['orientation_valid_mae'] = angle_mse
# pose
if 'pose' in tasks:
gt = np.array(labels['valid']['pose'])
pred = np.argmax(np.array(scores['valid']['pose']), axis=-1)
mask = gt != -100
conf = metrics.confusion_matrix(gt[mask], pred[mask],
labels=[0, 1, 2])
class_accuracies = conf.diagonal() / conf.sum(axis=-1)
bal_accuracy = class_accuracies.mean()
logs['pose_valid_balanced_accuracy'] = bal_accuracy
# append epoch for csv logging
logs['epoch'] = epoch
csvlogger.write_logs(logs)
if losses_overall['train'] <= 2*1e-6:
with open(os.path.join(train_dir, 'early_stopping.csv'), 'w') as f:
f.write('epoch,running_loss_train\n')
f.write(f"{epoch},{losses_overall['train']:.10f}")
break
# update loss weights
if args.weight_mode == 'dwa':
weights = get_new_loss_weights_dwa(
cur_weights=weights,
loss_history=weights_loss_history,
tasks=tasks,
momentum=args.weight_mode_dwa_momentum,
t=args.weight_mode_dwa_temperature
)
# all done
print('Finished Training')
if __name__ == '__main__':
main()
|
{"hexsha": "899171e5e7bd83c05a254e59345e179586948a4c", "size": 18636, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "TUI-NICR/multi-task-person-perception", "max_stars_repo_head_hexsha": "81666eb42be9522fd726448e82e8bbf04138ffa3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-07-28T06:26:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T09:31:34.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "TUI-NICR/multi-task-person-perception", "max_issues_repo_head_hexsha": "81666eb42be9522fd726448e82e8bbf04138ffa3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "TUI-NICR/multi-task-person-perception", "max_forks_repo_head_hexsha": "81666eb42be9522fd726448e82e8bbf04138ffa3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.972972973, "max_line_length": 103, "alphanum_fraction": 0.5492058382, "include": true, "reason": "import numpy", "num_tokens": 3711}
|
[STATEMENT]
lemma generic_poly_closed:
"generic_poly R n \<in> carrier (coord_ring R (Suc (Suc n)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>])
[PROOF STEP]
apply(induction n)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. generic_poly R 0 \<in> carrier (R [\<X>\<^bsub>Suc (Suc 0)\<^esub>])
2. \<And>n. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<Longrightarrow> generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
using pvar_closed[of 1 "Suc (Suc n)"]
[PROOF STATE]
proof (prove)
using this:
1 < Suc (Suc n) \<Longrightarrow> pvar R 1 \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>])
goal (2 subgoals):
1. generic_poly R 0 \<in> carrier (R [\<X>\<^bsub>Suc (Suc 0)\<^esub>])
2. \<And>n. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<Longrightarrow> generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
apply (metis One_nat_def generic_poly.simps(1) lessI pvar_closed)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<Longrightarrow> generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<Longrightarrow> generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
fix n
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<Longrightarrow> generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
assume IH: "generic_poly R n \<in> carrier (coord_ring R (Suc (Suc n)))"
[PROOF STATE]
proof (state)
this:
generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>])
goal (1 subgoal):
1. \<And>n. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<Longrightarrow> generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
have "generic_poly R n \<in> carrier (coord_ring R (Suc (Suc (Suc n))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
using IH poly_ring_car_mono'[of "Suc (Suc n)"]
[PROOF STATE]
proof (prove)
using this:
generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>])
carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<subseteq> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<subseteq> carrier (R [\<X>\<^bsub>Suc (Suc n) + ?m\<^esub>])
goal (1 subgoal):
1. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
goal (1 subgoal):
1. \<And>n. generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>]) \<Longrightarrow> generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
show " generic_poly R (Suc n) \<in> carrier (coord_ring R (Suc (Suc (Suc n))))"
[PROOF STATE]
proof (prove)
using this:
generic_poly R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
goal (1 subgoal):
1. generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
[PROOF STEP]
unfolding coord_ring_def
[PROOF STATE]
proof (prove)
using this:
generic_poly R n \<in> carrier (Pring R {..<Suc (Suc (Suc n))})
goal (1 subgoal):
1. generic_poly R (Suc n) \<in> carrier (Pring R {..<Suc (Suc (Suc n))})
[PROOF STEP]
using generic_poly.simps[of R] generic_poly_lt_closed[of n]
[PROOF STATE]
proof (prove)
using this:
generic_poly R n \<in> carrier (Pring R {..<Suc (Suc (Suc n))})
generic_poly R 0 = pvar R 1
generic_poly R (Suc ?n) = generic_poly R ?n \<oplus>\<^bsub>R [\<X>\<^bsub>?n + 3\<^esub>]\<^esub> generic_poly_lt R (Suc ?n)
generic_poly_lt R n \<in> carrier (R [\<X>\<^bsub>Suc (Suc n)\<^esub>])
goal (1 subgoal):
1. generic_poly R (Suc n) \<in> carrier (Pring R {..<Suc (Suc (Suc n))})
[PROOF STEP]
by (metis MP.add.m_closed R.Pring_add_eq coord_ring_def generic_poly_lt_closed)
[PROOF STATE]
proof (state)
this:
generic_poly R (Suc n) \<in> carrier (R [\<X>\<^bsub>Suc (Suc (Suc n))\<^esub>])
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2149, "file": "Padic_Field_Ring_Powers", "length": 15}
|
#== # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Description
#
# Compute the satellite position.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ==#
export satellite_position_i
"""
satellite_position_i(a::Number, e::Number, i::Number, RAAN::Number, w::Number, f::Number)
Compute the satellite position in the Earth-Centered Inertial (ECI) reference
frame given the orbital elements `a`, `e`, `i`, `RAAN`, `w`, and `f`.
Notice that the ECI frame used will be the same as the frame of the orbital
elements.
# Args
* `a`: Semi-major axis.
* `e`: Eccentricity.
* `i`: Inclination [rad].
* `RAAN`: Right ascension of the ascending node [rad].
* `w`: Argument of perigee [rad].
* `f`: True anomaly [rad].
# Returns
* The satellite position vector represented in the ECI reference frame.
* The unit vector perpendicular to the satellite position vector that lies on
the orbit plane represented in the ECI reference frame.
# Remarks
The satellite position vector will have the same unit of the semi-major axis.
"""
function satellite_position_i(a::Number, e::Number, i::Number, RAAN::Number,
w::Number, f::Number)
# Compute the radius from the focus.
norm_r = a*(1-e^2)/(1+e*cos(f))
# Let s be the coordinate system in which:
# - The X axis points towards the satellite;
# - The Z axis is normal to the orbit plane (right-hand direction);
# - The Y axis completes a right-hand coordinate system.
#
# Thus, the satellite vector represented in the s coordinate frame is:
r_s = SVector{3}(1,0,0)*norm_r
# rt is the versor perpendicular to the r vector that lies on the orbit
# plane.
rt_s = SVector{3}(0,1,0)
# Compute the matrix that rotates from the S coordinate frame to the
# Inertial coordinate Frame.
Dis = angle_to_dcm(RAAN, i, w+f, :ZXZ)'
# Compute the satellite vector represented in the Inertial coordinate
# frame.
r_i = Dis*r_s
# Compute unit vector `rt` represented in the Inertial coordinate frame.
rt_i = Dis*rt_s
# Return.
return r_i, rt_i
end
|
{"hexsha": "714b32880ab815d6fa175ae2a5ffd52bd2547b56", "size": 2169, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/transformations/position.jl", "max_stars_repo_name": "disberd/SatelliteToolbox.jl", "max_stars_repo_head_hexsha": "441470938af978e9d5653a9c4b36ccc107023960", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 157, "max_stars_repo_stars_event_min_datetime": "2018-06-19T21:11:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T19:24:41.000Z", "max_issues_repo_path": "src/transformations/position.jl", "max_issues_repo_name": "disberd/SatelliteToolbox.jl", "max_issues_repo_head_hexsha": "441470938af978e9d5653a9c4b36ccc107023960", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 71, "max_issues_repo_issues_event_min_datetime": "2018-06-18T20:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T21:33:20.000Z", "max_forks_repo_path": "src/transformations/position.jl", "max_forks_repo_name": "disberd/SatelliteToolbox.jl", "max_forks_repo_head_hexsha": "441470938af978e9d5653a9c4b36ccc107023960", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2018-10-02T02:42:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T20:36:51.000Z", "avg_line_length": 30.5492957746, "max_line_length": 93, "alphanum_fraction": 0.6279391425, "num_tokens": 610}
|
import re
import numpy as np
from numba import jit
from collections import deque
ALPHABET = """abcdefghijklmnopqrstuvwxyz1234567890,.()[]"' -\n"""
class Tokenizer(object):
def __init__(self, alphabet=ALPHABET, unk="~"):
assert unk not in alphabet, "please keep UNK character not part of alphabet"
self.alphabet = sorted(list(set(alphabet)))
self._to_int = { # dict mapping known characters to integers
c: i for i, c in enumerate(self.alphabet)
}
self._unk_i = len(self.alphabet) # int for unknown chars
self.unk = unk
self.V = len(self.alphabet) + 1 # dim of 1-hot array
def to_ints(self, text):
"""Returns a list of integers"""
return [self._to_int.get(c, self._unk_i) for c in text]
def to_1hot(self, text):
"""Return an array of shape `(len(text), len(self.alphabet)+1)`."""
tokens = self.to_ints(text)
arr = np.zeros(shape=(len(text), len(self.alphabet) + 1), dtype="float32")
# each row of arr represent a character of text
for i_row, tok in enumerate(tokens):
arr[i_row, tok] = 1
return arr
def from_ints(self, L):
"""Reconstruct the text given list of integer tokens"""
return "".join(
[self.alphabet[i] if i < len(self.alphabet) else self.unk for i in L]
)
def from_1hot(self, arr):
"""Reconstruct the text given an array"""
return self.from_ints([row.argmax() for row in arr])
@jit(nopython=True)
def normalized(arr):
total = arr.sum()
return arr / total
def next_line_with_rotation(f):
s = f.readline()
if len(s) == 0:
f.seek(0)
s = f.readline()
return s
def data_generator(corpus_path, window_sizes, post_func):
max_window = max(window_sizes)
length = 1 + 2 * max_window
with open(corpus_path, "r", encoding="utf-8") as f:
# Initialize buffer and window
buffer = deque([])
window = deque([])
while len(buffer) < length * 10 + max_window:
buffer.extend(next_line_with_rotation(f).lower())
for i in range(length):
window.append(buffer.popleft())
assert len(window) == length
#
while True:
# extend buffer if needed
while len(buffer) < length * 10:
buffer.extend(next_line_with_rotation(f).lower())
#
window.popleft()
window.append(buffer.popleft())
yield post_func(window)
|
{"hexsha": "d798f8daa81fd53347fd1cf2676b779a6f23db89", "size": 2538, "ext": "py", "lang": "Python", "max_stars_repo_path": "char2vec/utils.py", "max_stars_repo_name": "sonlamho/Char2Vec", "max_stars_repo_head_hexsha": "88141ecf33cd48ad84c14f8999cb3988d51d205b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-02-11T15:49:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-19T16:47:54.000Z", "max_issues_repo_path": "char2vec/utils.py", "max_issues_repo_name": "sonlamho/Char2Vec", "max_issues_repo_head_hexsha": "88141ecf33cd48ad84c14f8999cb3988d51d205b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:13:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:30:59.000Z", "max_forks_repo_path": "char2vec/utils.py", "max_forks_repo_name": "sonlamho/Char2Vec", "max_forks_repo_head_hexsha": "88141ecf33cd48ad84c14f8999cb3988d51d205b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-02-11T15:54:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-26T17:44:19.000Z", "avg_line_length": 32.1265822785, "max_line_length": 84, "alphanum_fraction": 0.5961386919, "include": true, "reason": "import numpy,from numba", "num_tokens": 623}
|
"""
act.retrievals.cbh
------------------
Module that calculates cloud base heights in various ways.
"""
import numpy as np
import xarray as xr
from scipy import ndimage
def generic_sobel_cbh(obj, variable=None, height_dim=None,
var_thresh=None, fill_na=None,
return_thresh=False):
"""
Function for calculating cloud base height from lidar/radar data
using a basic sobel filter and thresholding. Note, this was not
initially based on any published work, but a lit review indicates
that there have been similar methods employed to detect boundary
layer heights.
Parameters
----------
obj : ACT Object
ACT object where data are stored.
variable : string
Variable on which to process.
height_dim : string
Height variable to use for CBH values.
var_thresh : float
Thresholding for variable if needed.
fill_na : float
What to fill nans with in DataArray if any.
Returns
-------
new_obj : ACT Object
ACT Object with cbh values included as variable.
Examples
--------
In testing on the ARM KAZR and MPL data, the following methods
tended to work best for thresholding/corrections/etc.
.. code-block:: python
kazr = act.retrievals.cbh.generic_sobel_cbh(kazr,variable='reflectivity_copol',
height_dim='range', var_thresh=-10.)
mpl = act.corrections.mpl.correct_mpl(mpl)
mpl.range_bins.values = mpl.height.values[0,:]*1000.
mpl.range_bins.attrs['units'] = 'm'
mpl['signal_return_co_pol'].values[:,0:10] = 0.
mpl = act.retrievals.cbh.generic_sobel_cbh(mpl,variable='signal_return_co_pol',
height_dim='range_bins',var_thresh=10.,
fill_na=0.)
ceil = act.retrievals.cbh.generic_sobel_cbh(ceil,variable='backscatter',
height_dim='range', var_thresh=1000.,
fill_na=0)
"""
if variable is None:
return
if fill_na is None:
fill_na = var_thresh
# Pull data into Standalone DataArray
data = obj[variable]
# Apply thresholds if set
if var_thresh is not None:
data = data.where(data.values > var_thresh)
# Fill with fill_na values
data = data.fillna(fill_na)
# If return_thresh is True, replace variable data with
# thresholded data
if return_thresh is True:
obj[variable].values = data.values
# Apply Sobel filter to data and smooth the results
data = data.values
edge = ndimage.sobel(data)
edge = ndimage.uniform_filter(edge, size=3, mode='nearest')
# Create Data Array
edge_obj = xr.DataArray(edge, dims=obj[variable].dims)
# Filter some of the resulting edge data to get defined edges
edge_obj = edge_obj.where(edge_obj > 5.)
edge_obj = edge_obj.fillna(fill_na)
# Do a diff along the height dimension to define edge
diff = edge_obj.diff(dim=1)
# Get height variable to use for cbh
height = obj[height_dim].values
# Run through times and find the height
cbh = []
for i in range(np.shape(diff)[0]):
index = np.where(diff[i, :] > 5.)[0]
if len(np.shape(height)) > 1:
ht = height[i, :]
else:
ht = height
if len(index) > 0:
cbh.append(ht[index[0] + 1])
else:
cbh.append(np.nan)
# Create DataArray to add to Object
da = xr.DataArray(cbh, dims=['time'], coords=[obj['time'].values])
obj['cbh_sobel'] = da
obj['cbh_sobel'].attrs['long_name'] = ' '.join(['CBH calculated from',
variable, 'using sobel filter'])
obj['cbh_sobel'].attrs['units'] = obj[height_dim].attrs['units']
return obj
|
{"hexsha": "fbd3444711c9f466bc70aaefeec0e0cadf8cf7c9", "size": 3953, "ext": "py", "lang": "Python", "max_stars_repo_path": "act/retrievals/cbh.py", "max_stars_repo_name": "rcjackson/ACT", "max_stars_repo_head_hexsha": "c57fb55094b142bbbef63e7069d4024049996139", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-13T16:10:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-13T16:10:37.000Z", "max_issues_repo_path": "act/retrievals/cbh.py", "max_issues_repo_name": "cgodine/ACT", "max_issues_repo_head_hexsha": "af9f0edb76e6f16e2764d5441a4bf4d7fb3a9f39", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "act/retrievals/cbh.py", "max_forks_repo_name": "cgodine/ACT", "max_forks_repo_head_hexsha": "af9f0edb76e6f16e2764d5441a4bf4d7fb3a9f39", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.624, "max_line_length": 88, "alphanum_fraction": 0.5990387048, "include": true, "reason": "import numpy,from scipy", "num_tokens": 924}
|
# -*- coding: utf-8 -*-
"""
Meteorological, 2D{1,1,2,1,1} dataset
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
# %%
# The following dataset is obtained from `NOAA/NCEP Global Forecast System (GFS)
# Atmospheric Model
# <https://coastwatch.pfeg.noaa.gov/erddap/griddap/NCEP_Global_Best.graph?ugrd10m[(2017-09-17T12:00:00Z)][(-4.5):(52.0)][(275.0):(331.5)]&.draw=surface&.vars=longitude%7Clatitude%7Cugrd10m&.colorBar=%7C%7C%7C%7C%7C&.bgColor=0xffccccff>`_
# and subsequently converted to the CSD model file-format.
# The dataset consists of two spatial dimensions describing the geographical
# coordinates of the earth surface and five dependent variables with
# 1) surface temperature, 2) air temperature at 2 m, 3) relative humidity,
# 4) air pressure at sea level as the four `scalar` quantity_type dependent
# variables, and 5) wind velocity as the two-component `vector`, quantity_type
# dependent variable.
#
# Let's import the `csdmpy` module and load this dataset.
import csdmpy as cp
filename = "https://osu.box.com/shared/static/6uhrtdxfisl4a14x9pndyze2mv414zyg.csdf"
multi_dataset = cp.load(filename)
# sphinx_gallery_thumbnail_number = 5
# %%
# The tuple of dimension and dependent variable objects from
# ``multi_dataset`` instance are
x = multi_dataset.dimensions
y = multi_dataset.dependent_variables
# %%
# The dataset contains two dimension objects representing the `longitude` and
# `latitude` of the earth's surface. The labels along thee respective dimensions are
print(x[0].label)
# %%
print(x[1].label)
# %%
# There are a total of five dependent variables stored in this dataset. The first
# dependent variable is the surface air temperature. The data structure of this
# dependent variable is
print(y[0].data_structure)
# %%
# If you have followed all previous examples, the above data structure should
# be self-explanatory.
# %%
# We will use the following snippet to plot the dependent variables of scalar
# `quantity_type`.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_scalar(yx):
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
# Set the extents of the image plot.
extent = [
x[0].coordinates[0].value,
x[0].coordinates[-1].value,
x[1].coordinates[0].value,
x[1].coordinates[-1].value,
]
# Add the image plot.
im = ax.imshow(yx.components[0], origin="lower", extent=extent, cmap="coolwarm")
# Add a colorbar.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax)
cbar.ax.set_ylabel(yx.axis_label[0])
# Set up the axes label and figure title.
ax.set_xlabel(x[0].axis_label)
ax.set_ylabel(x[1].axis_label)
ax.set_title(yx.name)
# Set up the grid lines.
ax.grid(color="k", linestyle="--", linewidth=0.5)
plt.tight_layout()
plt.show()
# %%
# Now to plot the data from the dependent variable.
plot_scalar(y[0])
# %%
# Similarly, other dependent variables with their respective plots are
print(y[1].name)
# %%
plot_scalar(y[1])
# %%
print(y[3].name)
# %%
plot_scalar(y[3])
# %%
print(y[4].name)
# %%
plot_scalar(y[4])
# %%
# Notice, we skipped the dependent variable at index two. The reason is that
# this particular dependent variable is a vector dataset,
print(y[2].quantity_type)
# %%
print(y[2].name)
# %%
# which represents the wind velocity, and requires a vector visualization
# routine. To visualize the vector data, we use the matplotlib quiver plot.
def plot_vector(yx):
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
magnitude = np.sqrt(yx.components[0] ** 2 + yx.components[1] ** 2)
cf = ax.quiver(
x[0].coordinates,
x[1].coordinates,
yx.components[0],
yx.components[1],
magnitude,
pivot="middle",
cmap="inferno",
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(cf, cax)
cbar.ax.set_ylabel(yx.name + " / " + str(yx.unit))
ax.set_xlim([x[0].coordinates[0].value, x[0].coordinates[-1].value])
ax.set_ylim([x[1].coordinates[0].value, x[1].coordinates[-1].value])
# Set axes labels and figure title.
ax.set_xlabel(x[0].axis_label)
ax.set_ylabel(x[1].axis_label)
ax.set_title(yx.name)
# Set grid lines.
ax.grid(color="gray", linestyle="--", linewidth=0.5)
plt.tight_layout()
plt.show()
# %%
plot_vector(y[2])
|
{"hexsha": "0bb74914da58fead57cf9c02b03cd53f38c90e0b", "size": 4660, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/correlated_examples/plot_1_meteorology.py", "max_stars_repo_name": "DeepanshS/csdmpy", "max_stars_repo_head_hexsha": "ae8d20dd09f217bb462af67a3145bb6fcb025def", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-01-04T20:46:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-26T21:09:25.000Z", "max_issues_repo_path": "examples/correlated_examples/plot_1_meteorology.py", "max_issues_repo_name": "deepanshs/csdmpy", "max_issues_repo_head_hexsha": "bd4e138b10694491113b10177a89305697f1752c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-06-09T06:28:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T18:12:33.000Z", "max_forks_repo_path": "examples/correlated_examples/plot_1_meteorology.py", "max_forks_repo_name": "deepanshs/csdmpy", "max_forks_repo_head_hexsha": "bd4e138b10694491113b10177a89305697f1752c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-03T17:04:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-03T17:04:16.000Z", "avg_line_length": 29.125, "max_line_length": 238, "alphanum_fraction": 0.6592274678, "include": true, "reason": "import numpy", "num_tokens": 1284}
|
import comet_ml
import pickle
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
from os.path import dirname, realpath
import sys
import git
sys.path.append(dirname(dirname(realpath(__file__))))
import torch
import torch.distributed as dist
import sandstone.datasets.factory as dataset_factory
import sandstone.models.factory as model_factory
import sandstone.augmentations.factory as augmentation_factory
import sandstone.utils.parsing as parsing
from sandstone.utils.generic import get_train_dataset_loader, get_eval_dataset_loader, get_nearest_neighbor_in_encoded_data
import warnings
from sandstone.utils.dataset_stats import get_dataset_stats
import pytorch_lightning as pl
import sandstone.learn.lightning.factory as lightning
from pytorch_lightning import LightningModule
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import _logger as log
import tqdm
import numpy as np
import json
#Constants
DATE_FORMAT_STR = "%Y-%m-%d:%H-%M-%S"
@torch.no_grad()
def main(args):
repo = git.Repo(search_parent_directories=True)
commit = repo.head.object
args.commit = commit.hexsha
result_path_stem = args.results_path.split("/")[-1].split('.')[0]
log.info("Sandstone main running from commit: \n\n{}\n{}author: {}, date: {}".format(
commit.hexsha, commit.message, commit.author, commit.committed_date))
args.lightning_name = 'adversarial_attack'
log.info("\nLoading data-augmentation scheme...")
augmentations = augmentation_factory.get_augmentations(
args.image_augmentations, args.tensor_augmentations, args)
test_augmentations = augmentation_factory.get_augmentations(
args.test_image_augmentations, args.test_tensor_augmentations, args)
# Load dataset and add dataset specific information to args
log.info("\nLoading data...")
model = lightning.get_lightning_model(args)
log.info("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
if attr not in ['optimizer_state']:
log.info("\t{}={}".format(attr.upper(), value))
save_path = args.results_path
print("Model")
print(model.attack_encoder)
model = model.cuda()
parallel_data_challenge(args, augmentations, test_augmentations, model)
real_world_challenge(args, augmentations, test_augmentations, model)
args.model_path = os.path.join(args.submission_dir, 'model.p')
pickle.dump(vars(args), open(os.path.join(args.submission_dir, 'args.p' ), 'wb'))
torch.save(model, args.model_path)
def parallel_data_challenge(args, augmentations, test_augmentations, model):
if os.path.exists( os.path.join(args.submission_dir, 'parallel_data_predictions.json' )):
print("Parallel data submission already created. Skipping creation..")
return
print("### Prepare parallel data Challenge submission")
dataset_name = 'stanford_cxr_edema'
_, _, test_data = dataset_factory.get_dataset_by_name(dataset_name, args, augmentations, test_augmentations)
loader = get_eval_dataset_loader(args, test_data, args.batch_size, False)
path_to_pred_npy_path = {}
for batch in tqdm.tqdm(loader):
x = batch['x'].cuda()
y = batch['y'].cpu().numpy().tolist()
z = model.encode_input(x)[-1].mean(dim=1).cpu().numpy()
for j in range(len(z)):
path = batch['path'][j]
predicted_path = get_nearest_neighbor_in_encoded_data(z[j], os.path.join(args.encoded_data_dir, 'private_encoded_dataset'), reduce_mean=True)
path_to_pred_npy_path[path]= predicted_path
## Save real paths, args, and model for future eval
if not os.path.exists(args.submission_dir):
os.mkdir(args.submission_dir)
json.dump(path_to_pred_npy_path, open(os.path.join(args.submission_dir, 'parallel_data_predictions.json' ), 'w'))
def real_world_challenge(args, augmentations, test_augmentations, model):
if os.path.exists( os.path.join(args.submission_dir, 'out_of_domain_npy_path_to_orig_path_dict.json' )):
print("Real world (no-parallel data) submission already created. Skipping creation..")
return
print("### Prepare real world (no-parallel data) Challenge submission")
dataset_name = 'stanford_cxr_edema'
target_data, _, _ = dataset_factory.get_dataset_by_name(dataset_name, args, augmentations, test_augmentations)
loader = get_eval_dataset_loader(args, target_data, args.batch_size, True)
paths = {}
idx = 0
for batch in tqdm.tqdm(loader):
x = batch['x'].cuda()
y = batch['y'].cpu().numpy().tolist()
z = model.encode_input(x)[-1].mean(dim=1).cpu().numpy()
if not os.path.exists(args.submission_dir):
os.mkdir(args.submission_dir)
if not os.path.exists(os.path.join(args.submission_dir, 'challenge_2_target_dataset')):
os.mkdir(os.path.join(args.submission_dir, 'challenge_2_target_dataset'))
for j in range(len(z)):
path = batch['path'][j]
npy_path = os.path.join(os.path.join(args.submission_dir, 'challenge_2_target_dataset'), '{}.npy'.format(idx) )
idx += 1
np.save(npy_path, z[j])
paths[npy_path] = path
## Save real paths, args, and model for future eval
json.dump(paths, open(os.path.join(args.submission_dir, 'out_of_domain_npy_path_to_orig_path_dict.json' ), 'w'))
if __name__ == '__main__':
__spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
args = parsing.parse_args()
main(args)
|
{"hexsha": "9c4a9277f0cb85d640b8129b375966265f4b281d", "size": 5556, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/create_submission.py", "max_stars_repo_name": "yala/NeuraCrypt", "max_stars_repo_head_hexsha": "6c9862d1076095d76779af03a3a9ffd2cfec748a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-06-12T12:34:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T07:12:28.000Z", "max_issues_repo_path": "scripts/create_submission.py", "max_issues_repo_name": "yala/NeuraCrypt", "max_issues_repo_head_hexsha": "6c9862d1076095d76779af03a3a9ffd2cfec748a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-03T18:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-30T15:40:51.000Z", "max_forks_repo_path": "scripts/create_submission.py", "max_forks_repo_name": "yala/NeuraCrypt", "max_forks_repo_head_hexsha": "6c9862d1076095d76779af03a3a9ffd2cfec748a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8529411765, "max_line_length": 153, "alphanum_fraction": 0.7192224622, "include": true, "reason": "import numpy", "num_tokens": 1296}
|
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
from configuration import Configuration
import torch
import torch.nn.functional as F
import torch.optim as optim
class DDPGAgent():
"""A class to create DDPG agents that interact and learn from the enviroment."""
def __init__(self, state_size, action_size, index, seed):
"""Initilize the Agent.
Params:
state_size: dimension of the state
action_size: dimension of the action
seed: random seed
"""
self.config = Configuration()
self.epsilon = self.config.epsilon
self.index = index
# Set up the Actor networks
self.actor_local = Actor(state_size, action_size, seed, fc1_units=self.config.actor_fc1, fc2_units=self.config.actor_fc2).to(self.config.device)
self.actor_target = Actor(state_size, action_size, seed, fc1_units=self.config.actor_fc1, fc2_units=self.config.actor_fc2).to(self.config.device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=self.config.lr_actor)
# Set up the Critic networks
self.critic_local = Critic(state_size, action_size, seed, fc1_units=self.config.critic_fc1, fc2_units=self.config.critic_fc2).to(self.config.device)
self.critic_target = Critic(state_size, action_size, seed, fc1_units=self.config.critic_fc1, fc2_units=self.config.critic_fc2).to(self.config.device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=self.config.lr_critic, weight_decay=self.config.weight_decay)
# Copy over the weights
self.hard_copy(self.actor_local, self.actor_target)
self.hard_copy(self.critic_local, self.critic_target)
def act(self, state):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(self.config.device)
# Put model in evaluating mode
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
# Put model back in training mode
self.actor_local.train()
return action
def learn(self, index, experiences, gamma, all_next_actions, all_actions):
"""Update policy and value using given batch of experiences given.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
Params:
experiences: tuple of (s, a, r, s', done) tuples
gamma: discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Reset the gradients
self.critic_optimizer.zero_grad()
index = torch.tensor([index]).to(self.config.device)
actions_next = torch.cat(all_next_actions, dim=1).to(self.config.device)
with torch.no_grad():
q_next = self.critic_target(torch.cat((next_states, actions_next), dim=1))
q_expected = self.critic_local(torch.cat((states, actions), dim=1))
q_t = rewards.index_select(1, index) + (gamma * q_next * (1 - dones.index_select(1, index)))
F.mse_loss(q_expected, q_t.detach()).backward()
self.critic_optimizer.step()
self.actor_optimizer.zero_grad()
actions_predicted = [actions if i == self.index else actions.detach() for i, actions in enumerate(all_actions)]
actions_predicted = torch.cat(actions_predicted, dim=1).to(self.config.device)
actor_loss = -self.critic_local(torch.cat((states, actions_predicted), dim=1)).mean()
actor_loss.backward()
self.actor_optimizer.step()
# Update target networks
self.soft_update(self.critic_local, self.critic_target, self.config.tau)
self.soft_update(self.actor_local, self.actor_target, self.config.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params:
local_model: model that weights will be copied from
target_model: model that weights will be copied to
tau: soft update parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
def hard_copy(self, local_model, target_model):
for target_param, param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(param.data)
def save(self):
torch.save(self.actor_local.state_dict(),
str(self.config.actor_fc1)+'_'+str(self.config.actor_fc2) + '_' + str(self.index) + '_actor.pth')
torch.save(self.critic_local.state_dict(),
str(self.config.critic_fc1)+'_'+str(self.config.critic_fc2) + '_' + str(self.index) + '_critic.pth')
def load(self, actor_file, critic_file):
self.actor_local.load_state_dict(torch.load(actor_file))
self.critic_local.load_state_dict(torch.load(critic_file))
self.hard_copy(self.actor_local, self.actor_target)
self.hard_copy(self.critic_local, self.critic_target)
|
{"hexsha": "843de20a45a544f507a1183ca51af079f7e67f09", "size": 5384, "ext": "py", "lang": "Python", "max_stars_repo_path": "ddpg.py", "max_stars_repo_name": "riley-mld/UnityTennis_DRLN", "max_stars_repo_head_hexsha": "a0a78b76870b46d2c75105591140357871b4fab3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ddpg.py", "max_issues_repo_name": "riley-mld/UnityTennis_DRLN", "max_issues_repo_head_hexsha": "a0a78b76870b46d2c75105591140357871b4fab3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ddpg.py", "max_forks_repo_name": "riley-mld/UnityTennis_DRLN", "max_forks_repo_head_hexsha": "a0a78b76870b46d2c75105591140357871b4fab3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4958677686, "max_line_length": 157, "alphanum_fraction": 0.662332838, "include": true, "reason": "import numpy", "num_tokens": 1167}
|
#include <boost/test/unit_test.hpp>
#include "../Coverage/SymbolNameUtils.h"
#include "TestUtils.h"
#include "GLib/compat.h"
#include "GLib/Cpp/HtmlGenerator.h"
#include <fstream>
namespace GLib::Cpp
{
std::ostream & operator<<(std::ostream & s, const Fragment & f)
{
return s << "State: " << f.first << ", Value: \'" << f.second << '\'';
}
}
using namespace GLib::Cpp;
void Parse(const Holder & code)
{
for (const auto & x : code)
{
(void) x;
// std::cout << x.first << " : " << x.second << std::endl;
}
}
BOOST_AUTO_TEST_SUITE(CppIteratorTests)
BOOST_AUTO_TEST_CASE(Empty)
{
Holder code {R"()"};
std::vector<Fragment> expected {};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(Code0)
{
Holder code {"void"};
std::vector<Fragment> expected {{State::Code, "void"}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(Code1)
{
Holder code {R"(void foo)"};
std::vector<Fragment> expected {{State::Code, "void"}, {State::WhiteSpace, " "}, {State::Code, "foo"}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CodeNoWs)
{
Holder code {R"(void foo)", false};
std::vector<Fragment> expected {{State::Code, "void foo"}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CommentBlock)
{
Holder code {R"(/***/)"};
std::vector<Fragment> expected {{State::CommentBlock, {"/***/"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CommentWhiteSpace)
{
Holder code {R"(/**/
;)"};
std::vector<Fragment> expected {{State::CommentBlock, {"/**/"}}, {State::WhiteSpace, {" "}}, {State::Code, {"\n;"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CommentLineContinue)
{
Holder code {R"(// hello\
continue
/* block */ /* another block */
)"};
std::vector<Fragment> expected {
{State::CommentLine, {"// hello\\\ncontinue\n"}},
{State::CommentBlock, {"/* block */"}},
{State::WhiteSpace, {" "}},
{State::CommentBlock, {"/* another block */"}},
{State::WhiteSpace, {"\n"}},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CommentLineNotContinue)
{
Holder code {"// hello \\ not continuation"};
std::vector<Fragment> expected {{State::CommentLine, {"// hello \\ not continuation"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CommentLineNotContinueEnd)
{
Holder code {"// hello not continuation \\/"};
std::vector<Fragment> expected {{State::CommentLine, {"// hello not continuation \\/"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CommentStar)
{
Holder code {"/* * */"};
std::vector<Fragment> expected {{State::CommentBlock, {"/* * */"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(NotCommentStart)
{
Holder code {"int foo=bar/baz;"};
std::vector<Fragment> expected {{State::Code, {"int"}}, {State::WhiteSpace, {" "}}, {State::Code, {"foo=bar"}}, {State::Code, {"/baz;"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CommentFromStateCode)
{
Holder code {"bar//comment\n;"};
std::vector<Fragment> expected {{State::Code, {"bar"}}, {State::CommentLine, {"//comment\n"}}, {State::Code, {";"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(String)
{
Holder code {R"(auto fred = "this is a string";)"};
std::vector<Fragment> expected {
{State::Code, {"auto"}},
{State::WhiteSpace, {" "}},
{State::Code, {"fred"}},
{State::WhiteSpace, {" "}},
{State::Code, {"="}},
{State::WhiteSpace, {" "}},
{State::String, {"\"this is a string\""}},
{State::Code, {";"}},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(StringFromStateCode)
{
Holder code {R"(;"hello";)"};
std::vector<Fragment> expected {{State::Code, {";"}}, {State::String, {R"("hello")"}}, {State::Code, {";"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(StringContinue)
{
Holder code {R"--("abc\
def")--"};
std::vector<Fragment> expected {{State::String, {R"--("abc\
def")--"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(StringNotContinue)
{
Holder code {R"--("\\abc\\")--"};
std::vector<Fragment> expected {{State::String, {R"--("\\abc\\")--"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(StringWithQuotes)
{
Holder code {R"--("\"abc\"")--"};
std::vector<Fragment> expected {{State::String, {R"--("\"abc\"")--"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(RawString)
{
Holder code {R"--(auto fred = R"(this is a raw string)";)--"};
std::vector<Fragment> expected {
{State::Code, {"auto"}}, {State::WhiteSpace, {" "}}, {State::Code, {"fred"}}, {State::WhiteSpace, {" "}},
{State::Code, {"="}}, {State::WhiteSpace, {" "}}, {State::Code, {"R"}}, {State::RawString, {"\"(this is a raw string)\""}},
{State::Code, {";"}},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(RawStringPrefix)
{
Holder code {R"--(auto fred = R"==(this is a raw string)==";)--"};
std::vector<Fragment> expected {
{State::Code, {"auto"}}, {State::WhiteSpace, {" "}}, {State::Code, {"fred"}}, {State::WhiteSpace, {" "}},
{State::Code, {"="}}, {State::WhiteSpace, {" "}}, {State::Code, {"R"}}, {State::RawString, {R"--("==(this is a raw string)==")--"}},
{State::Code, {";"}},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(RawStringIgnored)
{
Holder code {R"(R"--(hello)--)--")"};
std::vector<Fragment> expected {{State::Code, "R"}, {State::RawString, "\"--(hello)--)--\""}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(RawStringPrefixTooLong)
{
std::string_view code = R"(R"12345678901234567(content)12345678901234567")";
GLIB_CHECK_RUNTIME_EXCEPTION(Parse(code), "Illegal character: '7' (0x37) at line: 1, state: RawStringPrefix");
}
BOOST_AUTO_TEST_CASE(RawStringPrefixErrorSpace)
{
std::string_view code = R"(R" (content) ")";
GLIB_CHECK_RUNTIME_EXCEPTION(Parse(code), "Illegal character: ' ' (0x20) at line: 1, state: RawStringPrefix");
}
BOOST_AUTO_TEST_CASE(RawStringPrefixErrorCloseParenthesis)
{
std::string_view code = R"--(R")(content)(")--";
GLIB_CHECK_RUNTIME_EXCEPTION(Parse(code), "Illegal character: ')' (0x29) at line: 1, state: RawStringPrefix");
}
BOOST_AUTO_TEST_CASE(RawStringPrefixBackslash)
{
std::string_view code = R"--(R"\(content)\")--";
GLIB_CHECK_RUNTIME_EXCEPTION(Parse(code), "Illegal character: '\\' (0x5c) at line: 1, state: RawStringPrefix");
}
BOOST_AUTO_TEST_CASE(RawStringNewLine)
{
Holder code {
R"--(R"(1
2
3)")--"};
std::vector<Fragment> expected {
{State::Code, {"R"}},
{State::RawString, {R"--("(1
2
3)")--"}},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(Main)
{
Holder code {
R"--(#include <iostream>
int main() // main
{
std::cout << "HelloWorld!" << std::endl;
std::cout << R"(HelloWorld!)" << std::endl;
return 0;
}
)--"};
std::vector<Fragment> expected {
{State::Directive, {"#include <iostream>"}},
{State::WhiteSpace, {"\n\n"}},
{State::Code, {"int"}},
{State::WhiteSpace, {" "}},
{State::Code, {"main()"}},
{State::WhiteSpace, {" "}},
{State::CommentLine, {"// main\n"}},
{State::Code, {"{"}},
{State::WhiteSpace, {"\n\t"}},
{State::Code, {"std::cout"}},
{State::WhiteSpace, {" "}},
{State::Code, {"<<"}},
{State::WhiteSpace, {" "}},
{State::String, {R"("HelloWorld!")"}},
{State::WhiteSpace, {" "}},
{State::Code, {"<<"}},
{State::WhiteSpace, {" "}},
{State::Code, {"std::endl;"}},
{State::WhiteSpace, {"\n\t"}},
{State::Code, {"std::cout"}},
{State::WhiteSpace, {" "}},
{State::Code, {"<<"}},
{State::WhiteSpace, {" "}},
{State::Code, {"R"}},
{State::RawString, {"\"(HelloWorld!)\""}},
{State::WhiteSpace, {" "}},
{State::Code, {"<<"}},
{State::WhiteSpace, {" "}},
{State::Code, {"std::endl;"}},
{State::WhiteSpace, {"\n\t"}},
{State::Code, {"return"}},
{State::WhiteSpace, {" "}},
{State::Code, {"0;"}},
{State::WhiteSpace, {"\n"}},
{State::Code, {"}"}},
{State::WhiteSpace, {"\n"}},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(SystemInclude)
{
Holder code {R"--(#include <experimental/filesystem>)--"};
std::vector<Fragment> expected {{State::Directive, {"#include <experimental"}}, {State::Directive, {"/filesystem>"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CharacterLiteral)
{
Holder code {R"(auto char1='"';
auto char2='\"';
auto char3='\\';
)"};
std::vector<Fragment> expected {
{State::Code, {"auto"}}, {State::WhiteSpace, {" "}}, {State::Code, {"char1="}}, {State::CharacterLiteral, {R"('"')"}},
{State::Code, {";"}}, {State::WhiteSpace, {"\n"}},
{State::Code, {"auto"}}, {State::WhiteSpace, {" "}}, {State::Code, {"char2="}}, {State::CharacterLiteral, {R"('\"')"}},
{State::Code, {";"}}, {State::WhiteSpace, {"\n"}},
{State::Code, {"auto"}}, {State::WhiteSpace, {" "}}, {State::Code, {"char3="}}, {State::CharacterLiteral, {R"('\\')"}},
{State::Code, {";"}}, {State::WhiteSpace, {"\n"}},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CharacterLiteralFromStateNone)
{
Holder code {R"('\x00';)"};
std::vector<Fragment> expected {{State::CharacterLiteral, {R"('\x00')"}}, {State::Code, {";"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(CharacterLiteralFromStateWhitespace)
{
Holder code {R"( '\x00';)"};
std::vector<Fragment> expected {{State::WhiteSpace, {" "}}, {State::CharacterLiteral, {R"('\x00')"}}, {State::Code, {";"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(NotCharacterLiteral)
{
Holder code {R"(0xFFFF'FFFFU;)"};
std::vector<Fragment> expected {{State::Code, {"0xFFFF'FFFFU;"}}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(Guard)
{
Holder code {R"(/* comment */
#ifndef file_included // another comment
#define file_included
#endif /* not file_included */
)"};
std::vector<Fragment> expected {
{State::CommentBlock, "/* comment */"},
{State::WhiteSpace, "\n"},
{State::Directive, "#ifndef file_included "},
{State::CommentLine, "// another comment\n"},
{State::Directive, "#define file_included"},
{State::WhiteSpace, "\n\n"},
{State::Directive, "#endif "},
{State::CommentBlock, "/* not file_included */"},
{State::WhiteSpace, "\n"},
};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(DirectiveContinue)
{
Holder code {R"(#include \
"foo")"};
std::vector<Fragment> expected {{State::Directive, "#include \\\n\"foo\""}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(TerminationError)
{
std::string_view code = R"("stringNotClosed)";
GLIB_CHECK_RUNTIME_EXCEPTION(Parse(code), "Termination error, State: String, StartLine: 1");
}
BOOST_AUTO_TEST_CASE(DirectiveNotContinue)
{
auto code = Holder {R"(# define foo \ //)"};
std::vector<Fragment> expected {{State::Directive, "# define foo \\ "}, {State::CommentLine, "//"}};
BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), code.begin(), code.end());
}
BOOST_AUTO_TEST_CASE(Html)
{
std::string_view code = ";";
std::ostringstream stm;
Htmlify(code, stm);
auto expected = ";";
BOOST_TEST(expected == stm.str());
}
BOOST_AUTO_TEST_CASE(Html2)
{
std::string_view code = "#include \"foo.h\"";
std::ostringstream stm;
Htmlify(code, stm);
auto expected = "<span class=\"d\">#include\xC2\xB7"foo.h"</span>";
BOOST_TEST(expected == stm.str());
}
BOOST_AUTO_TEST_CASE(Html3)
{
std::string_view code = R"(/*
1
2
3
*/)";
std::ostringstream stm;
Htmlify(code, stm);
auto expected = R"(<span class="c">/*</span>
<span class="c">1</span>
<span class="c">2</span>
<span class="c">3</span>
<span class="c">*/</span>)";
BOOST_TEST(expected == stm.str());
}
BOOST_AUTO_TEST_CASE(KeywordAndCommonType)
{
std::string_view code = "auto v=std::vector{};";
std::ostringstream stm;
Htmlify(code, stm);
auto expected = "<span class=\"k\">auto</span>"
"<span class=\"w\">\xC2\xB7</span>"
"v="
"std::<span class=\"t\">vector</span>"
"{};";
TestUtils::Compare(stm.str(), expected);
}
BOOST_AUTO_TEST_CASE(SymbolNameCleanup)
{
std::string value = "NoCleanUp";
RemoveTemplateDefinitions(value);
BOOST_TEST("NoCleanUp" == value);
value = "Foo<T1,T2>::Bar<T3>";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T>::Bar<T>" == value);
value = "Foo<Bar, Baz>::Qux<Quux, Quuz>";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T>::Qux<T>" == value);
value = "Foo<Bar, Baz<Qux<Quux, Quuz>>>::Corge";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T>::Corge" == value);
}
BOOST_AUTO_TEST_CASE(SymbolNamePreOps)
{
std::string value = "Foo<Bar>::operator->";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T>::operator->" == value);
value = "operator> Foo<Bar>";
RemoveTemplateDefinitions(value);
BOOST_TEST("operator> Foo<T>" == value);
value = "operator>> Foo<Bar>";
RemoveTemplateDefinitions(value);
BOOST_TEST("operator>> Foo<T>" == value);
value = "operator< Foo<Bar>";
RemoveTemplateDefinitions(value);
BOOST_TEST("operator< Foo<T>" == value);
value = "operator<< Foo<Bar>";
RemoveTemplateDefinitions(value);
BOOST_TEST("operator<< Foo<T>" == value);
}
BOOST_AUTO_TEST_CASE(SymbolNamePostOps)
{
std::string value = "Foo<Bar> operator>";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T> operator>" == value);
value = "Foo<Bar> operator>>";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T> operator>>" == value);
value = "Foo<Bar> operator<";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T> operator<" == value);
value = "Foo<Bar> operator<<";
RemoveTemplateDefinitions(value);
BOOST_TEST("Foo<T> operator<<" == value);
}
BOOST_AUTO_TEST_CASE(SymbolNameError)
{
std::string value = ">foo<";
GLIB_CHECK_RUNTIME_EXCEPTION({ RemoveTemplateDefinitions(value); }, "Unable to parse symbol: >foo<");
}
BOOST_AUTO_TEST_CASE(UnterminatedBug)
{
std::string_view code = R"(//\)"; // test conpilers have no error
std::ostringstream stm;
GLIB_CHECK_RUNTIME_EXCEPTION({ Htmlify(code, stm); }, "Termination error, State: CommentLine, StartLine: 1");
}
//**/ #define BULK_TEST
#ifdef BULK_TEST
namespace fs = GLib::Compat::filesystem;
void ScanFile(const fs::path & p, std::ostream & s)
{
std::ifstream t(p);
if (!t)
{
std::cout << "read failed : " + p.u8string() << '\n';
return;
}
try
{
std::stringstream ss;
ss << t.rdbuf();
Parse(Holder {ss.str()});
}
catch (const std::runtime_error & e)
{
s << p.u8string() << " : " << e.what() << '\n';
}
}
BOOST_AUTO_TEST_CASE(BulkTest)
{
auto paths = {
#if defined(__linux__) && defined(__GNUG__)
"/usr/include"
#elif defined(_WIN32) && defined(_MSC_VER)
R"--(C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.24.28314\include)--",
R"--(C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.24.28314\crt)--",
R"--(C:\Users\Glen\source\ExternalDependencies\boost_1_69_0_test\boost)--"
#else
#error unknown
#endif
};
std::ostringstream s;
for (auto p : paths)
{
size_t count {};
for (const auto & de : fs::recursive_directory_iterator(p))
{
if (is_regular_file(de.path()) && de.path().extension() != ".asm")
{
ScanFile(de.path(), s);
++count;
}
}
std::cout << "BulkTest: " << p << ", Count: " << count << '\n';
}
BOOST_TEST("" == s.str());
}
#endif
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "1f34b70a7c0a086b7e4b4a5d2167010c7e1f1682", "size": 16924, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Tests/CppIteratorTests.cpp", "max_stars_repo_name": "glen-summers/GLibCopy", "max_stars_repo_head_hexsha": "3a0a7d0fc17f1ea98b723120d8394257047d4386", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2018-11-14T18:37:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T11:33:59.000Z", "max_issues_repo_path": "Tests/CppIteratorTests.cpp", "max_issues_repo_name": "glen-summers/GLibCopy", "max_issues_repo_head_hexsha": "3a0a7d0fc17f1ea98b723120d8394257047d4386", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tests/CppIteratorTests.cpp", "max_forks_repo_name": "glen-summers/GLibCopy", "max_forks_repo_head_hexsha": "3a0a7d0fc17f1ea98b723120d8394257047d4386", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2795031056, "max_line_length": 139, "alphanum_fraction": 0.6454738832, "num_tokens": 4719}
|
# docker attach kairi_nvidia
# conda activate train
import random
import numpy as np
import logging
import time
from requests.api import get
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from models import DecoderWithBertEmbedding
import logging
from datasets import ContextCaptionDataset
from utils import save_checkpoint, adjust_learning_rate, accuracy, count_parameters, clip_gradient, AverageMeter
from nltk.translate.bleu_score import corpus_bleu
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
PATH = '/mnt/nas2/seungil/' # folder with data files saved by create_input_files.py
##########################
""" vilbert module """
from types import SimpleNamespace
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.modeling_bert import BertModel
##########################
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def get_args():
args = SimpleNamespace(from_pretrained= PATH + "pretrained_model.bin",
bert_model="bert-base-uncased",
config_file="config/bert_base_6layer_6conect.json",
train='train',
val='val',
do_lower_case=True,
predict_feature=False,
seed=42,
workers=16,
baseline=False,
dynamic_attention=False,
task_specific_tokens=True,
batch_size=128,
save_name = 'FA,GA,AA0~AA7', # base name shared by data files
start_epoch = 0,
epochs = 120, # number of epochs to train for (if early stopping is not triggered)
epochs_since_improvement = 0, # keeps track of number of epochs since there's been an improvement in validation BLEU
encoder_lr = 1e-4, # learning rate for encoder if fine-tuning
decoder_lr = 4e-3, # learning rate for decoder #4e-4
grad_clip = 5., # clip gradients at an absolute value of
alpha_c = 1., # regularization parameter for 'doubly stochastic attention', as in the paper
best_bleu4 = 0., # BLEU-4 score right now
print_freq = 100, # print training/validation stats every __ batches
fine_tune_encoder = False, # fine-tune encoder?
device = torch.device("cuda:4" if torch.cuda.is_available() else "cpu"), # sets device for model and PyTorch tensors
checkpoint = None #"BEST_checkpoint_FA_dataset.pth.tar"
)
return args
def main():
"""
From ViLBERT
"""
args = get_args()
if args.baseline:
print("when baseline is True")
from pytorch_transformers.modeling_bert import BertConfig
from vilbert.basebert import BaseBertForVLTasks
else:
from vilbert.vilbert import BertConfig
from vilbert.vilbert import VILBertForVLTasks
config = BertConfig.from_json_file(args.config_file)
timeStamp = args.from_pretrained.split('/')[-1] + '-' + args.save_name
config = BertConfig.from_json_file(args.config_file)
default_gpu=True
if args.predict_feature:
config.v_target_size = 2048
config.predict_feature = True
else:
config.v_target_size = 1601
config.predict_feature = False
if args.task_specific_tokens:
config.task_specific_tokens = True
if args.dynamic_attention:
config.dynamic_attention = True
config.visualization = True
num_labels = 3129
if args.baseline:
encoder = BaseBertForVLTasks.from_pretrained(
args.from_pretrained, config=config, num_labels=num_labels, default_gpu=default_gpu
)
else:
encoder = VILBertForVLTasks.from_pretrained(
args.from_pretrained, config=config, num_labels=num_labels, default_gpu=default_gpu
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case
)
"""
Training and validation.
"""
global best_bleu4, epochs_since_improvement, checkpoint, start_epoch, fine_tune_encoder
start_epoch = args.start_epoch
epochs = args.epochs
epochs_since_improvement = args.epochs_since_improvement
best_bleu4 = args.best_bleu4
checkpoint = args.checkpoint
if checkpoint is None:
# Load pre-trained model (weights)
BertForDecoder = BertModel.from_pretrained(args.bert_model).to(args.device)
BertForDecoder.eval()
decoder = DecoderWithBertEmbedding(vocab_size=30522,use_glove=False, use_bert=True, tokenizer=tokenizer, BertModel=BertForDecoder)
decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),
lr=args.decoder_lr)
# encoder.fine_tune(fine_tune_encoder)
encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),
lr=args.encoder_lr) if args.fine_tune_encoder else None
else:
logger.info(f"Loaded from checkpoint: {checkpoint}")
checkpoint = torch.load(PATH + 'checkpoints/' + checkpoint, map_location=str(args.device))
start_epoch = checkpoint['epoch'] + 1
epochs_since_improvement = checkpoint['epochs_since_improvement']
best_bleu4 = checkpoint['bleu-4']
decoder = checkpoint['decoder']
decoder_optimizer = checkpoint['decoder_optimizer']
encoder = checkpoint['encoder']
encoder_optimizer = checkpoint['encoder_optimizer']
if args. fine_tune_encoder is True and encoder_optimizer is None:
# encoder.fine_tune(fine_tune_encoder)
encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),
lr=args.encoder_lr)
n_gpu = torch.cuda.device_count()
# if n_gpu>0:
# torch.cuda.manual_seed_all(args.seed)
logger.info(
"device: {} n_gpu: {}".format(
args.device, n_gpu
)
)
if n_gpu>1:
encoder = nn.DataParallel(encoder,device_ids = [4,5,6,7])
decoder = nn.DataParallel(decoder,device_ids = [4,5,6,7])
# Move to GPU, if available
decoder = decoder.to(args.device)
encoder = encoder.to(args.device)
encoder.eval()
criterion = nn.CrossEntropyLoss().to(args.device)
# Custom dataloaders
train_loader = torch.utils.data.DataLoader(
ContextCaptionDataset(
"TASK19",
dataroot=PATH,
annotations_jsonpath= PATH + 'jsonlines/' + args.train + '.jsonline',
split='train',
features_h5path1 = PATH + 'lmdbs/' + args.train,
features_h5path2 = '',
tokenizer=tokenizer,
bert_model=args.bert_model,
),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
ContextCaptionDataset(
"TASK19",
dataroot=PATH,
annotations_jsonpath= PATH + 'jsonlines/' + args.val + '.jsonline',
split='val',
features_h5path1 = PATH + 'lmdbs/' + args.val,
features_h5path2 = '',
tokenizer=tokenizer,
bert_model=args.bert_model,
),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
logger.info("***** Running training *****")
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", epochs)
logger.info("*****Total Number of Parameters*****")
logger.info(" Encoder # = %d", count_parameters(encoder))
logger.info(" Decoder # = %d", count_parameters(decoder))
# Epochs
for epoch in range(start_epoch, epochs):
if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
adjust_learning_rate(decoder_optimizer, 0.8)
if args.fine_tune_encoder:
adjust_learning_rate(encoder_optimizer, 0.8)
# One epoch's training
train(train_loader=train_loader,
encoder=encoder,
decoder=decoder,
criterion=criterion,
encoder_optimizer=encoder_optimizer,
decoder_optimizer=decoder_optimizer,
epoch=epoch)
# One epoch's validation
recent_bleu4 = validate(val_loader=val_loader,
encoder=encoder,
decoder=decoder,
criterion=criterion,
tokenizer=tokenizer)
# Check if there was an improvement
is_best = recent_bleu4 > best_bleu4
best_bleu4 = max(recent_bleu4, best_bleu4)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# Save checkpoint
save_checkpoint(args.save_name, epoch, epochs_since_improvement, encoder, decoder, encoder_optimizer,
decoder_optimizer, recent_bleu4, is_best)
def train(train_loader, encoder, decoder, criterion, encoder_optimizer, decoder_optimizer, epoch):
"""
Performs one epoch's training.
:param train_loader: DataLoader for training data
:param encoder: encoder model
:param decoder: decoder model
:param criterion: loss layer
:param encoder_optimizer: optimizer to update encoder's weights (if fine-tuning)
:param decoder_optimizer: optimizer to update decoder's weights
:param epoch: epoch number
"""
args = get_args()
decoder.train() # train mode (dropout and batchnorm is used)
# encoder.train()
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss (per word decoded)
top5accs = AverageMeter() # top5 accuracy
start = time.time()
# Batches
for i, batch in enumerate(train_loader):
# features, spatials, image_mask, question, target, input_mask, segment_ids, co_attention_mask, question_id = (
# batch
# )
# I think this version is more appropriate, right?
features, spatials, image_mask, context, caption, input_mask, segment_ids, co_attention_mask, image_id, caplens = (
batch
)
task_tokens = context.new().resize_(context.size(0), 1).fill_(19)
data_time.update(time.time() - start)
# Move to GPU, if available
context = context.to(args.device)
features = features.to(args.device)
spatials = spatials.to(args.device)
segment_ids = segment_ids.to(args.device)
input_mask = input_mask.to(args.device)
image_mask = image_mask.to(args.device)
co_attention_mask = co_attention_mask.to(args.device)
task_tokens = task_tokens.to(args.device)
# Forward prop.
_, _, _, _, _, _, _, _, _, _, pooled_output = encoder(
context, # input txt
features, # input imgs
spatials, # img loc
segment_ids, # token type id
input_mask, # text attention mask
image_mask, # img attention mask
co_attention_mask, # co attention mask
task_tokens, # default = None
)
pooled_output = pooled_output.to(args.device)
caption = caption.to(args.device)
# caplens = (torch.tensor([32,len(caption)])).to(device)
# scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(pooled_output, caption, caplens)
scores, caps_sorted, decode_lengths, alphas = decoder(pooled_output, caption, caplens)
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _, _, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _, _, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = criterion(scores, targets).to(args.device) # .to(device)
# Add doubly stochastic attention regularization
loss += args.alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Back prop.
decoder_optimizer.zero_grad()
if encoder_optimizer is not None:
encoder_optimizer.zero_grad()
loss.backward()
# Clip gradients
if args.grad_clip is not None:
clip_gradient(decoder_optimizer, args.grad_clip)
if encoder_optimizer is not None:
clip_gradient(encoder_optimizer, args.grad_clip)
# Update weights
decoder_optimizer.step()
if encoder_optimizer is not None:
encoder_optimizer.step()
# Keep track of metrics
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % args.print_freq == 0:
print(f"Epoch: [{epoch}][{i}/{len(train_loader)}]")
print(f"Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})")
print(f"Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})")
print(f"Loss {float(losses.val):.4f} ({float(losses.avg):.4f})")
print(f"Top-5 Accuracy {float(top5accs.val):.3f} ({float(top5accs.avg):.3f})")
def validate(val_loader, encoder, decoder, criterion, tokenizer):
"""
Performs one epoch's validation.
:param val_loader: DataLoader for validation data.
:param encoder: encoder model
:param decoder: decoder model
:param criterion: loss layer
:return: BLEU-4 score
"""
args = get_args()
decoder.eval() # eval mode (no dropout or batchnorm)
if encoder is not None:
encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
references = list() # references (true captions) for calculating BLEU-4 score
hypothesis = list() # hypothesis (predictions)
# explicitly disable gradient calculation to avoid CUDA memory error
# solves the issue #57
with torch.no_grad():
# Batches
for i, batch in enumerate(val_loader):
# print(batch)
features, spatials, image_mask, context, caption, input_mask, segment_ids, co_attention_mask, image_id, caplens = (
batch
)
task_tokens = context.new().resize_(context.size(0), 1).fill_(19)
# Move to GPU, if available
context = context.to(args.device)
features = features.to(args.device)
spatials = spatials.to(args.device)
segment_ids = segment_ids.to(args.device)
input_mask = input_mask.to(args.device)
image_mask = image_mask.to(args.device)
co_attention_mask = co_attention_mask.to(args.device)
task_tokens = task_tokens.to(args.device)
# Forward prop.
_, _, _, _, _, _, _, _, _, _, pooled_output = encoder(
context,
features,
spatials,
segment_ids,
input_mask,
image_mask,
co_attention_mask,
task_tokens,
)
pooled_output = pooled_output.to(args.device)
caption = caption.to(args.device)
# caplans = ~
scores, caps_sorted, decode_lengths, alphas = decoder(pooled_output, caption, caplens)
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# print(f"targets shape from cap_sorted[:, 1:] : {targets.shape}") #torch.Size([32, 29])
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores_packed, _, _, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
#[32, 29, 30522] , torch.Size([32]) <= [tensor([29]), .. , tensor([29])]
targets_packed, _, _, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
loss = criterion(scores_packed, targets_packed)
# Add doubly stochastic attention regularization
loss += args.alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Keep track of metrics
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores_packed, targets_packed, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
if i % args.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss_val:.4f} ({loss_avg:.4f})\t'
'Top-5 Accuracy {top5_val:.3f} ({top5_avg:.3f})\t'.format(i, len(val_loader), batch_time=batch_time, loss_val = float(losses.val), loss_avg = float(losses.avg), top5_val = float(top5accs.val), top5_avg=float(top5accs.avg)))
# References
for j in range(targets.shape[0]):
img_caps = targets[j].tolist() # validation dataset only has 1 unique caption per img
img_caps = tokenizer.convert_ids_to_tokens(img_caps) # th) it has to be a one sentence
clean_cap = [w for w in img_caps if w not in ["[PAD]","[CLS]","[SEP]"]] # remove pad, start, and end # clean function
references.append(clean_cap)
# hypothesis
# preds.shape torch.Size([32, 29])
_, preds = torch.max(scores, dim=2)
# _, preds ==> values, and index respectively
# "dim =1" means it extracts 928 elements from 928 * 30522. that is, criterion is dim 1 which will be shrinked
# print(f"predicted logits : {preds}")
preds = preds.tolist()
preds_token = []
for l in preds :
preds_token.append(tokenizer.convert_ids_to_tokens(l))
for j, p in enumerate(preds_token):
# print(f"iter : {j}, p in preds : {p}, p shape : {len(p)}")
# print(f"decode_lengths : {decode_lengths}, len : {len(decode_lengths)}")
# pred = p[:decode_lengths[j]] # decode_lenths is from decoder's 3rd output, like ... 29? 30?
pred = p[:decode_lengths[j]]
pred = [w for w in pred if w not in ["[PAD]", "[CLS]","[SEP]"]]
hypothesis.append(pred) # remove pads, start, and end
assert len(references) == len(hypothesis)
# Calculate BLEU-4 scores
bleu4 = corpus_bleu(references, hypothesis)
print(
'\n * LOSS - {loss_avg:.3f}, TOP-5 ACCURACY - {top5_avg:.3f}, BLEU-4 - {bleu}\n'.format(
loss_avg=float(losses.avg),
top5_avg=float(top5accs.avg),
bleu=bleu4))
for r,h in zip(references,hypothesis):
logger.info(' '.join(r)+ '\n' + ' '.join(h) + '\n')
logger.info("*****Validation Done*****")
return bleu4
if __name__ == '__main__':
main()
|
{"hexsha": "5fe59f94d4e06a8d88db218964723fcac49fe187", "size": 20865, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "seobeomjin/vilbert-multi-task", "max_stars_repo_head_hexsha": "20deed798f3ed4c4fcc736742e382e3528d61914", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-13T04:40:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-13T04:40:16.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "seobeomjin/vilbert-multi-task", "max_issues_repo_head_hexsha": "20deed798f3ed4c4fcc736742e382e3528d61914", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "seobeomjin/vilbert-multi-task", "max_forks_repo_head_hexsha": "20deed798f3ed4c4fcc736742e382e3528d61914", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-07T01:42:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T01:42:26.000Z", "avg_line_length": 40.9117647059, "max_line_length": 306, "alphanum_fraction": 0.5998562185, "include": true, "reason": "import numpy", "num_tokens": 4638}
|
make object! [
doc: "Returns the total hits (display requests) for the snips in the space."
handle: func [/local total name] [
total: 0
foreach name space-dir [
total: total + to-integer space-meta-get snip "displays"
]
to-string total
]
]
|
{"hexsha": "070d5e499ca43cb57a6dcc057dcce227e5bfc5d7", "size": 308, "ext": "r", "lang": "R", "max_stars_repo_path": "code/apps/stats/total-views.r", "max_stars_repo_name": "earl/vanilla", "max_stars_repo_head_hexsha": "76e3b6196820810690aa113362dc1c4f34381b85", "max_stars_repo_licenses": ["AFL-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-05-05T18:30:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-08T08:40:00.000Z", "max_issues_repo_path": "code/apps/stats/total-views.r", "max_issues_repo_name": "earl/vanilla", "max_issues_repo_head_hexsha": "76e3b6196820810690aa113362dc1c4f34381b85", "max_issues_repo_licenses": ["AFL-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/apps/stats/total-views.r", "max_forks_repo_name": "earl/vanilla", "max_forks_repo_head_hexsha": "76e3b6196820810690aa113362dc1c4f34381b85", "max_forks_repo_licenses": ["AFL-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-07-30T16:22:00.000Z", "max_forks_repo_forks_event_max_datetime": "2017-07-30T16:22:00.000Z", "avg_line_length": 28.0, "max_line_length": 80, "alphanum_fraction": 0.5616883117, "num_tokens": 74}
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""create mindrecord data for LAMBADA task"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import logging
import numpy as np
from mindspore.mindrecord import FileWriter
from src.utils.tokenization import Tokenizer
def create_instance(tokenizer, text, max_length=None):
"""A single sample instance for LAMBADA task."""
text = text.replace(" \t ", "\t ")
sentence = text.strip().split("\t")
context_length = len(tokenizer.encode(sentence[0]))
whole_sentence = sentence[0] + sentence[1]
whole_sentence = whole_sentence.strip()
assert whole_sentence != ""
print(" | whole sentence: ", whole_sentence)
ids = tokenizer.encode(whole_sentence)
input_length = len(ids)
pair_ids = None
output = tokenizer.prepare_for_model(ids=ids,
pair_ids=pair_ids,
add_special_tokens=True,
max_length=max_length,
padding=True,
truncate_direction="RIGHT",
return_overflowing_tokens=False,
return_attention_mask=True)
# input_length = <bos> + text_length, not include <eos>
output["length"] = [context_length + 1] + [input_length + 1]
for k, v in output.items():
print(k)
print(v)
print("==================================")
return output
def write_instance_to_file(writer, instance):
"""write the instance to file"""
input_ids = instance["input_ids"]
input_mask = instance["attention_mask"]
assert len(input_ids) == len(input_mask)
length = instance["length"] # list
features = collections.OrderedDict()
features["input_ids"] = np.asarray(input_ids)
features["input_mask"] = np.asarray(input_mask)
features["input_length"] = np.asarray(length)
writer.write_raw_data([features])
return features
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, required=True, help='Input raw text file. ')
parser.add_argument("--output_file", type=str, required=True, help='Output MindRecord file. ')
parser.add_argument("--num_splits", type=int, default=1,
help='The MindRecord file will be split into the number of partition. ')
parser.add_argument("--max_length", type=int, required=True, help='Maximum sequence length. ')
parser.add_argument("--vocab_file", type=str, required=True, default='', help='url of gpt2-vocab.json ')
parser.add_argument("--merge_file", type=str, required=True, default='', help='url of gpt2-merges.txt ')
args = parser.parse_args()
tokenizer = Tokenizer(vocab_file=args.vocab_file, merge_file=args.merge_file)
input_file = args.input_file
logging.info("***** Reading from input files *****")
logging.info("Input File: %s", input_file)
output_file = args.output_file
logging.info("***** Writing to output files *****")
logging.info("Output File: %s", output_file)
writer = FileWriter(output_file, args.num_splits)
data_schema = {"input_ids": {"type": "int64", "shape": [-1]},
"input_mask": {"type": "int64", "shape": [-1]},
"input_length": {"type": "int64", "shape": [-1]},
}
writer.add_schema(data_schema, "lambada-schema")
total_written = 0
total_read = 0
logging.info("***** Reading from %s *****", input_file)
with open(input_file, "r") as f:
while True:
line = f.readline()
if not line:
break
total_read += 1
if total_read % 500 == 0:
logging.info("%d ...", total_read)
output = create_instance(tokenizer, line, args.max_length)
features = write_instance_to_file(writer, instance=output)
total_written += 1
if total_written <= 20:
logging.info("***** Example *****")
logging.info("input tokens: %s", tokenizer.decode(output["input_ids"][:-1]))
logging.info("label tokens: %s", tokenizer.decode(output["input_ids"][1:]))
for feature_name in features.keys():
feature = features[feature_name]
logging.info("%s: %s", feature_name, feature)
writer.commit()
logging.info("Wrote %d total instances", total_written)
if __name__ == "__main__":
main()
|
{"hexsha": "0ed368bcffad56e2c0bee2ca5722192e4377c538", "size": 5315, "ext": "py", "lang": "Python", "max_stars_repo_path": "research/nlp/gpt2/create_lambada_data.py", "max_stars_repo_name": "leelige/mindspore", "max_stars_repo_head_hexsha": "5199e05ba3888963473f2b07da3f7bca5b9ef6dc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 77, "max_stars_repo_stars_event_min_datetime": "2021-10-15T08:32:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T13:09:11.000Z", "max_issues_repo_path": "research/nlp/gpt2/create_lambada_data.py", "max_issues_repo_name": "leelige/mindspore", "max_issues_repo_head_hexsha": "5199e05ba3888963473f2b07da3f7bca5b9ef6dc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-10-30T14:44:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T06:57:57.000Z", "max_forks_repo_path": "research/nlp/gpt2/create_lambada_data.py", "max_forks_repo_name": "leelige/mindspore", "max_forks_repo_head_hexsha": "5199e05ba3888963473f2b07da3f7bca5b9ef6dc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2021-10-15T08:32:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T18:45:20.000Z", "avg_line_length": 37.695035461, "max_line_length": 108, "alphanum_fraction": 0.609219191, "include": true, "reason": "import numpy", "num_tokens": 1117}
|
from detectron2.utils.visualizer import ColorMode
from detectron2 import model_zoo
from detectron2.modeling import build_model
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data.datasets import register_coco_instances
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import COCOEvaluator
from detectron2.config import get_cfg
from detectron2.structures import BoxMode
# import some common libraries
import numpy as np
import pandas as pd
import cv2
import random
import torch, torchvision
import json
import os
class visualizeDetection():
def __init__(self) -> None:
self.filename = '/home/agun/mimic/dataset/VG/data/32a8f331-711282df-420eca1a-f5e8531e-02bc5db2.jpg'
self.outputdir = "/home/agun/mimic/dataset/VG/"
self.image_root = "/home/agun/mimic/dataset/VG/data/"
self.diseaselist = ['lung opacity', 'pleural effusion', 'atelectasis', 'enlarged cardiac silhouette',
'pulmonary edema/hazy opacity', 'pneumothorax', 'consolidation', 'fluid overload/heart failure', 'pneumonia']
self.organs = ["right lung", "right apical zone", "right upper lung zone", "right mid lung zone",
"right lower lung zone", "right hilar structures", "right costophrenic angle", "left lung", "left apical zone",
"left upper lung zone", "left mid lung zone", "left lower lung zone", "left hilar structures",
"left costophrenic angle", "mediastinum", "upper mediastinum", "cardiac silhouette", "trachea"]
def get_board_dicts(self, imgdir):
json_file = imgdir #Fetch the json file
with open(json_file) as f:
dataset_dicts = json.load(f)
for i in dataset_dicts:
filename = i["file_name"]
i["file_name"] = self.image_root + filename
for j in i["annotations"]:
j["bbox_mode"] = BoxMode.XYWH_ABS #Setting the required Box Mode
j["category_id"] = int(j["category_id"])
return dataset_dicts
def registerDataset():
#Registering the Dataset
for d in ["train", "test", "valid"]:
filename = os.path.join(self.outputdir, "xray_coco_{}.json".format(d))
DatasetCatalog.register("mimic_cxr_{}".format(d), lambda d=d: get_board_dicts(filename))
MetadataCatalog.get("mimic_cxr_{}".format(d)).set(thing_classes=self.organs)
board_metadata = MetadataCatalog.get("mimic_cxr_train")
print(board_metadata)
def setup(self):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")) #Get the basic model configuration from the model zoo
#Passing the Train and Validation sets
cfg.DATASETS.TRAIN = ("mimic_cxr_train",)
cfg.DATASETS.TEST = ("mimic_cxr_test",)
# Number of data loading threads
cfg.DATALOADER.NUM_WORKERS = 4
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
# Number of images per batch across all machines.
cfg.SOLVER.IMS_PER_BATCH = 16
cfg.SOLVER.BASE_LR = 1e-4 # pick a good LearningRate
cfg.SOLVER.MAX_ITER = 50000 #No. of iterations
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(thing_classes) # No. of classes = [HINDI, ENGLISH, OTHER]
cfg.TEST.EVAL_PERIOD = 10000 # No. of iterations after which the Validation Set is evaluated.
#Use the final weights generated after successful training for inference
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set the testing threshold for this model
#Pass the validation dataset
cfg.DATASETS.TEST = ("mimic_cxr_test", )
return cfg
def visualizeGT(self):
visualizer = Visualizer(im[:, :, ::-1], metadata=board_metadata)
vis = visualizer.draw_dataset_dict(d)
cv2.imwrite("train_{}.png".format(str(count)), vis.get_image()[:, :, ::-1])
print(outputs["instances"])
cv2.imwrite("train_{}.png".format(str(count)), vis.get_image()[:, :, ::-1])
cv2_imshow(v.get_image()[:, :, ::-1])
def visualizeDetection(self):
count = 0
im = cv2.imread(self.filename)
outputs = predictor(im)
print(outputs)
#float matrix of Nx4. Each row is (x1, y1, x2, y2).
boxes = outputs["instances"].pred_boxes.tensor.to('cpu').numpy()
objects = outputs["instances"].pred_classes.to('cpu').numpy()
print(boxes)
lst = ["right costophrenic angle", "cardiac silhouette"]
#red, green, blue,
clrs = [[255,0,0],[0,255,0], [0,0,255], [238,130,238], [255,165,0]]
for box, obj in zip(boxes,objects):
anatomy = thing_classes[obj]
if anatomy in lst:
image = cv2.imread(self.filename)
start_point = (int(box[0]), int(box[1]))
end_point = (int(box[2]), int(box[3]))
# color = list(np.random.random(size=3) * 256)
color = clrs[lst.index(anatomy)]
image = cv2.rectangle(image, start_point, end_point, color, 10)
im = cv2.rectangle(im, start_point, end_point, color, 10)
crop = image[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
cv2.imwrite("./imgs/{}.png".format(str(anatomy)), crop)
cv2.imwrite("./imgs/viz2.png", im)
if __name__ == '__main__':
viz = visualizeDetection()
cfg = viz.setup()
predictor = DefaultPredictor(cfg)
viz.registerDataset()
dataset_dicts = viz.get_board_dicts("../dataset/VG/xray_coco_test.json")
viz.visualizeDetection()
|
{"hexsha": "5184b5329292a4b6e351bcb46018f57590b5ea23", "size": 6025, "ext": "py", "lang": "Python", "max_stars_repo_path": "detection/visualize.py", "max_stars_repo_name": "Nkechinyere-Agu/AnaXnet-Original", "max_stars_repo_head_hexsha": "7719bd9f90d0f0ca4506d41def8ebc1e21f8f2c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "detection/visualize.py", "max_issues_repo_name": "Nkechinyere-Agu/AnaXnet-Original", "max_issues_repo_head_hexsha": "7719bd9f90d0f0ca4506d41def8ebc1e21f8f2c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detection/visualize.py", "max_forks_repo_name": "Nkechinyere-Agu/AnaXnet-Original", "max_forks_repo_head_hexsha": "7719bd9f90d0f0ca4506d41def8ebc1e21f8f2c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8174603175, "max_line_length": 156, "alphanum_fraction": 0.6506224066, "include": true, "reason": "import numpy", "num_tokens": 1543}
|
// Copyright (c) 2019 Bitcoin Association
// Distributed under the Open BSV software license, see the accompanying file LICENSE.
#include "test/test_bitcoin.h"
#include "checkqueuepool.h"
#include "taskcancellation.h"
#include <boost/test/unit_test.hpp>
#include <boost/thread/thread.hpp>
#include <array>
#include <atomic>
#include <future>
#include <mutex>
#include <thread>
namespace
{
/**
* Validator that simulates long running validation and exits only after
* it is unblocked by setting an external blocking variable to false
*/
struct CBlockingValidator
{
CBlockingValidator() = default;
CBlockingValidator(std::atomic<bool>& blocking)
: mBlocking{&blocking}
{/**/}
std::optional<bool> operator()(const task::CCancellationToken&)
{
while(mBlocking->load())
{
using namespace std::chrono_literals;
std::this_thread::sleep_for(100ms);
}
return true;
}
void swap(CBlockingValidator& check)
{
std::atomic<bool>* tmp = mBlocking;
mBlocking = check.mBlocking;
check.mBlocking = tmp;
}
std::atomic<bool>* mBlocking;
};
struct CDummyValidator
{
std::optional<bool> operator()(const task::CCancellationToken&)
{
return true;
}
void swap(CDummyValidator& check) {/**/}
};
struct CCancellingValidator
{
std::optional<bool> operator()(const task::CCancellationToken& token)
{
while(!token.IsCanceled());
return {};
}
void swap(CCancellingValidator& check) {/**/}
};
}
BOOST_FIXTURE_TEST_SUITE(checkqueue_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(check_queue_termination)
{
std::atomic<bool> running = false;
auto future =
std::async(
std::launch::async,
[&running]
{
running = true;
boost::thread_group threadGroup;
CCheckQueue<CDummyValidator> check{4, threadGroup, 1, ""};
// worker threads expect to be terminated by the interrupt signal
threadGroup.interrupt_all();
threadGroup.join_all();
});
using namespace std::chrono_literals;
while(!running.load())
{
std::this_thread::sleep_for(100ms);
}
BOOST_CHECK(future.wait_for(5s) == std::future_status::ready);
}
BOOST_AUTO_TEST_CASE(removal_of_threads_during_processing)
{
boost::thread_group threadGroup;
CCheckQueue<CBlockingValidator> check{4, threadGroup, 1, ""};
constexpr size_t checksNumber = 20;
std::array<std::atomic<bool>, checksNumber> blocking;
std::vector<CBlockingValidator> checks;
for(size_t i=0; i<checksNumber; ++i)
{
blocking[i] = true;
checks.emplace_back(blocking[i]);
}
auto source = task::CCancellationSource::Make();
check.StartCheckingSession(source->GetToken());
check.Add(checks);
threadGroup.interrupt_all();
for(auto& b : blocking)
{
b = false;
}
threadGroup.join_all();
// we expect that everything will be validated even though thread
// termination request was issued during execution
auto result = check.Wait();
BOOST_CHECK(result.has_value() && result.value());
}
BOOST_AUTO_TEST_CASE(premature_validation_cancellation)
{
boost::thread_group threadGroup;
CCheckQueue<CCancellingValidator> check{4, threadGroup, 1, ""};
std::vector<CCancellingValidator> checks(20);
auto source = task::CCancellationSource::Make();
check.StartCheckingSession(source->GetToken());
check.Add(checks);
source->Cancel();
// we expect that validation will be terminated without result as we quit
// before we tried to get to result
auto result = check.Wait();
threadGroup.interrupt_all();
threadGroup.join_all();
BOOST_CHECK(!result.has_value());
}
BOOST_AUTO_TEST_CASE(check_queue_pool_termination)
{
boost::thread_group threadGroup;
checkqueue::CCheckQueuePool<CDummyValidator, int> scriptCheckQueuePool{
4, threadGroup, 1, 4};
// worker threads expect to be terminated by the interrupt signal
threadGroup.interrupt_all();
threadGroup.join_all();
}
BOOST_AUTO_TEST_CASE(premature_implicit_cancellation_and_reusing_the_worst_checker)
{
boost::thread_group threadGroup;
checkqueue::CCheckQueuePool<CDummyValidator, int> scriptCheckQueuePool{
4, threadGroup, 1, 4};
auto source = task::CCancellationSource::Make();
std::optional<task::CCancellationToken> worstCancellationToken;
auto checkerWorst =
scriptCheckQueuePool.GetChecker(
1,
source->GetToken(),
&worstCancellationToken);
auto checker2 = scriptCheckQueuePool.GetChecker(2, source->GetToken());
auto checker3 = scriptCheckQueuePool.GetChecker(3, source->GetToken());
auto checker4 = scriptCheckQueuePool.GetChecker(4, source->GetToken());
// we need a lock since we access checkerWorst from two threads and checker
// is not thread safe
std::mutex worstWaitSyncLock;
// queue is returned to the pool only after checker goes out of scope or
// Wait() is called on it so we need to run it on a different thread
auto future =
std::async(
std::launch::async,
[
&worstWaitSyncLock,
&checkerWorst,
token = std::move(worstCancellationToken.value())]
{
// wait until pool requests the cancellation
while(!token.IsCanceled());
std::lock_guard lock{worstWaitSyncLock};
BOOST_CHECK(!checkerWorst.Wait().has_value());
});
// since we do not have any idle checkers left in the pool checkerWorst
// should be terminated by the pool without blocking
auto checkerBest = scriptCheckQueuePool.GetChecker(5, source->GetToken());
{
std::lock_guard lock{worstWaitSyncLock};
BOOST_CHECK(!checkerWorst.Wait().has_value());
}
BOOST_CHECK(checker2.Wait().value());
BOOST_CHECK(checker3.Wait().value());
BOOST_CHECK(checker4.Wait().value());
BOOST_CHECK(checkerBest.Wait().value());
threadGroup.interrupt_all();
threadGroup.join_all();
}
BOOST_AUTO_TEST_CASE(checkqueue_invalid_use__call_wait_before_session)
{
CCheckQueue<CDummyValidator> scriptCheckQueue{128};
BOOST_CHECK_THROW(scriptCheckQueue.Wait(), std::runtime_error);
scriptCheckQueue.StartCheckingSession(
task::CCancellationSource::Make()->GetToken());
scriptCheckQueue.Wait();
}
BOOST_AUTO_TEST_CASE(checkqueue_invalid_use__call_add_before_session)
{
CCheckQueue<CDummyValidator> scriptCheckQueue{128};
std::vector check{CDummyValidator{}};
BOOST_CHECK_THROW(scriptCheckQueue.Add(check), std::runtime_error);
scriptCheckQueue.StartCheckingSession(
task::CCancellationSource::Make()->GetToken());
scriptCheckQueue.Add(check);
scriptCheckQueue.Wait();
}
BOOST_AUTO_TEST_CASE(checkqueue_invalid_use__call_add_after_wait)
{
CCheckQueue<CDummyValidator> scriptCheckQueue{128};
std::vector check{CDummyValidator{}};
scriptCheckQueue.StartCheckingSession(
task::CCancellationSource::Make()->GetToken());
scriptCheckQueue.Add(check);
check = {CDummyValidator{}};
scriptCheckQueue.Wait();
BOOST_CHECK_THROW(scriptCheckQueue.Add(check), std::runtime_error);
}
BOOST_AUTO_TEST_CASE(checkqueue_invalid_use__call_second_session_before_wait)
{
CCheckQueue<CDummyValidator> scriptCheckQueue{128};
scriptCheckQueue.StartCheckingSession(
task::CCancellationSource::Make()->GetToken());
BOOST_CHECK_THROW(
scriptCheckQueue.StartCheckingSession(
task::CCancellationSource::Make()->GetToken()),
std::runtime_error);
scriptCheckQueue.Wait();
scriptCheckQueue.StartCheckingSession(
task::CCancellationSource::Make()->GetToken());
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "4812e003faddf6a70b68017bfa3982952ea563a3", "size": 8183, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/test/checkqueue_tests.cpp", "max_stars_repo_name": "bxlkm1/yulecoin", "max_stars_repo_head_hexsha": "3605faf2ff2e3c7bd381414613fc5c0234ad2936", "max_stars_repo_licenses": ["OML"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2019-08-02T02:49:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T15:51:48.000Z", "max_issues_repo_path": "src/test/checkqueue_tests.cpp", "max_issues_repo_name": "bxlkm1/yulecoin", "max_issues_repo_head_hexsha": "3605faf2ff2e3c7bd381414613fc5c0234ad2936", "max_issues_repo_licenses": ["OML"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/checkqueue_tests.cpp", "max_forks_repo_name": "bxlkm1/yulecoin", "max_forks_repo_head_hexsha": "3605faf2ff2e3c7bd381414613fc5c0234ad2936", "max_forks_repo_licenses": ["OML"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-08-02T02:50:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T03:21:38.000Z", "avg_line_length": 29.225, "max_line_length": 86, "alphanum_fraction": 0.6655260907, "num_tokens": 1806}
|
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
model_pred = tf.keras.models.load_model('CKmodel.h5')
model_pred.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
exp = ['Angry','Happy','Sad','Surprise']
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
age_list = ['(0 - 2)', '(4 - 6)', '(8 - 12)', '(15 - 20)', '(25 - 32)', '(38 - 43)', '(48 - 53)', '(60 - 100)']
gender_list = ['Male', 'Female']
def load_caffe_models():
age_net = cv2.dnn.readNetFromCaffe('deploy_age.prototxt', 'age_net.caffemodel')
gender_net = cv2.dnn.readNetFromCaffe('deploy_gender.prototxt', 'gender_net.caffemodel')
return(age_net, gender_net)
age_net , gender_net = load_caffe_models()
def detect_face(img):
flag = False
face_img = img.copy()
face_rects = face_cascade.detectMultiScale(face_img)
for (x,y,w,h) in face_rects:
cv2.rectangle(face_img, (x,y), (x+w,y+h), (255,255,255), 2)
crop_img = face_img[y:y+h,x:x+w]
flag = True
crop = crop_img.copy()
crop = cv2.resize(crop,(48,48))
crop = crop.reshape(1,48,48,3)
crop = crop.astype('float64')
pred = model_pred.predict(crop)
n1 = int(np.random.randint(0,256,1))
n2 = int(np.random.randint(0,256,1))
n3 = int(np.random.randint(0,256,1))
text = exp[pred.argmax()]
cv2.putText(face_img,text,(x,y-30),fontFace = cv2.FONT_ITALIC,
fontScale = 2,color=[n1,n2,n3],thickness=5)
crop_img = cv2.resize(crop_img,(227,227))
blob = cv2.dnn.blobFromImage(crop_img, 1, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
age_net.setInput(blob)
age_preds = age_net.forward()
age = age_list[age_preds[0].argmax()]
text = 'Age : ' + age
cv2.putText(face_img,text,(x-20,y+h+50),fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = 1,color=[0,220,0],thickness=3)
gender_net.setInput(blob)
gender_preds = gender_net.forward()
gender = gender_list[gender_preds[0].argmax()]
text = 'Gender : '+ gender
cv2.putText(face_img,text,(x+w+2,y+int(h/2)+20),fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = 1,color=[0,0,255],thickness=3)
if(flag):
return face_img
else:
return face_img
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
writer = cv2.VideoWriter('video_out.mp4', cv2.VideoWriter_fourcc(*'XVID'),15, (width, height))
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
frame = detect_face(frame)
writer.write(frame)
cv2.imshow('Video Face Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
writer.release()
cv2.destroyAllWindows()
|
{"hexsha": "93264d095e9c6004150c1e29a9f1d790900ddb94", "size": 3174, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_working.py", "max_stars_repo_name": "abhinavg8/Real-Time--Face-Analysis", "max_stars_repo_head_hexsha": "7b18a1308347743ea12d29fc7c3039ad7b172f32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-26T17:18:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-13T19:21:54.000Z", "max_issues_repo_path": "model_working.py", "max_issues_repo_name": "abhinavg8/Face-Emotion-Analysis", "max_issues_repo_head_hexsha": "7b18a1308347743ea12d29fc7c3039ad7b172f32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_working.py", "max_forks_repo_name": "abhinavg8/Face-Emotion-Analysis", "max_forks_repo_head_hexsha": "7b18a1308347743ea12d29fc7c3039ad7b172f32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-23T12:21:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-03T12:21:11.000Z", "avg_line_length": 32.7216494845, "max_line_length": 112, "alphanum_fraction": 0.5957781979, "include": true, "reason": "import numpy", "num_tokens": 876}
|
\section{Recurrent Neural Networks}
\label{sec:rnn}
Recurrent Neural Networks (RNNs) are one of the most commonly used typology of neural networks~\cite{lecun2015deep}. In recent years, thanks to advancements in their architecture~\cite{hochreiter1997long,chung2014empirical} and in computational power, they have become the standard to effectively model sequential data. They have been used successfully for tasks such as sentiment analysis~\cite{tang2015document}, speech recognition~\cite{graves2013speech}, image captioning~\cite{karpathy2015deep}, predicting tourist paths~\cite{palumbo2017predicting} and neural language models~\cite{mikolov2010recurrent}. One of the typical applications of RNNs is language modeling, i.e. the task of learning a probabilistic model of text in order to generate new text by recursively predicting the next word in a sentence~\cite{sutskever2011generating}. We use RNNs, more specifically Long-Short Term Memory (LSTM) cells~\cite{hochreiter1997long}, in a similar vein to the language modeling problem, i.e. training the network to predict the next track in a playlist and sampling tracks from the learned probability model to generate predictions. In practice, rather than using only the track as input, we use a richer representation that also exploits the artist, the album, the title and, possibly, lyrics features (Figure~\ref{fig:global_architecture}).
In the following sections, we describe in detail the input features as well as the generation strategy.
\begin{figure*}
\centering
\includegraphics[width=0.7\textwidth, height=0.35\textwidth]{figures/rnn.pdf}
\caption{RNN architecture for playlist completion. The input vectors include word2vec embeddings for the track, the album, and the artist, a fastText embedding for the playlist title and numerous features extracted from the lyrics.}
\label{fig:global_architecture}
\end{figure*}
\subsection{Input Vectors}
\subsubsection{Track, Album and Artist Embeddings}
\label{sec:track_embs}
In order to leverage the information in the dataset concerning tracks, artists and albums, we opt for an approach based on word2vec~\cite{mikolov2013distributed} embeddings. More precisely, we train the word2vec model separately on sequences of tracks, albums and artists in the order of appearance in the playlist, obtaining three separated word2vec models encoding co-occurrence patterns of tracks, albums and artists respectively. Each word2vec model is based on the Skip-gram model with negative sampling using default hyper-parameters of the Gensim implementation~\cite{rehurek_lrec}: embedding vector dimension is $d=100$, learning rate $\alpha = 0.025$ linearly decaying up to $min_{\alpha} = 0.0001$, window size $c = 5$, number of epochs is $\eta = 5$.
We concatenate the three representations of the tracks, albums and artists, obtaining an input vector $x_{w2v}$ whose dimensionality is $|x_{w2v}| = 300$.
\subsubsection{Titles Embeddings}
\label{sec:title_embs}
The title of a playlist can potentially contain interesting information about the intention and the purpose of its creator. The title can suggest that the tracks in certain playlist are intended to suit a certain goal (e.g. \textit{party}, \textit{workout}), a mood (\textit{sad songs}, \textit{relaxing}), a genre (\textit{country}, \textit{reggae}), or a topic (\textit{90's}, \textit{Christmas}). Our intuition, supported by the experiments described later in this section, is that playlists with similar titles may contain similar tracks.
The title similarity could rely on pre-trained models and thesauri. However, we opted for computing a model that is specific for the playlist continuation task, using the sole data of the MPD.
A playlist embedding $p_{w2v}$ is computed as the mean of the embeddings of the tracks composing the playlist, already generated in Section \ref{sec:track_embs}. The playlist embeddings are then grouped in $n$ clusters, applying the K-means algorithm.
We empirically observed that, apart from very general clusters, we also created clusters containing specialized playlists, obtaining as a consequence groups of titles that belong to the same semantic area. For example, a cluster contains playlists like \textit{Christmas feels}, \textit{December} or with titles including the emoji of Santa Claus, while another group encompasses playlists like \textit{country} and \textit{Alabama}.
Each cluster $c$ expresses a composed label, which is the concatenation of the titles of all the playlist $p \in c$ separated by a blank space. These labels can be seen as a corpus of $n$ documents (one for each cluster) that is used as input for the fastText algorithm~\cite{joulin2016fasttext}. Because this algorithm is able to represent textual information at the level of n-grams from 3 to 6 character, the Title2Rec model in output computes the embeddings of any playlist title, being this already seen in the dataset or totally unknown. Figure~\ref{fig:t2r_pipeline} illustrates the process of the Title2Rec model generation.
% source: https://docs.google.com/presentation/d/1KV4eFuYvFxS1Z25ZwOqu5TJ2zuBbeRAZAkR2Hbf8h_A/edit#slide=id.g3d7920112c_0_0
\begin{figure*}
\centering
\includegraphics[width=0.85\textwidth]{figures/t2r.pdf}
\caption{Pipeline for generating the title embedding model used in Title2Rec. The embeddings are computed through a fastText model trained on a corpus of concatenated titles of similar playlists.}
\label{fig:t2r_pipeline}
\end{figure*}
\subsubsection{Lyrics Embeddings}
\label{sec:lyrics}
Since playlists contain tracks that share semantic properties (such as the genre) and acoustic properties (such as the mood), we hypothesize their lyrics share features as well. To this end, we extract numerous features from the lyrics for a large set of tracks used in the MPD dataset ($v \in \mathbb{R}^{n}$) that describe different stylistic and linguistic dimensions of a song text:
\begin{itemize}
\item \textit{vocabulary} ($v \in \mathbb{R}$): as a measure of the vocabulary richness, we compute the type-token ratio of a song text.
\item \textit{style} ($v \in \mathbb{R}^{27}$): to estimate the linguistic style of a song text, we measure the line lengths (in characters and in tokens) and the frequencies of all major part-of-speech tags. We further count rhyme occurrences and \qu{echoisms} (sung words like \qu{laaalala} and \qu{yeeeeeeeaaaaaaah}).
\item \textit{semantics} ($v \in \mathbb{R}^{60}$): we build a topic model with 60 topics on the song text bag of words using Latent Dirichlet Allocation~\cite{LDA}. Each song text is then represented by its association to these topics.
\item \textit{orientation} ($v \in \mathbb{R}^{3}$): this dimension models how the song narrative (entities, events) is oriented with respect to the world. We encode a temporal dimension, i.e. whether the song mainly recounts past experiences or present/future ones, by representing the fraction of past tense verb forms to all verb forms as a feature.
\item \textit{emotion} ($v \in \mathbb{R}^{6}$): we model the subjectivity (subjective vs. objective) as well as the polarity (positive vs. negative) of the song text. Furthermore, the emotions conveyed are modelled in a common two-dimensional model that accounts for degrees of arousal and valence.
\item \textit{song structure} ($v \in \mathbb{R}^{4}$): as a proxy of the structure of the lyrics, we use the line lengths as well as the lengths of paragraphs in the song text.
\end{itemize}
For experimental purposes, we grouped the previous features in two additional categories:
\begin{itemize}
\item \textit{deterministic} ($v \in \mathbb{R}^{23}$): it encompasses all features generated in a deterministic way such as features related to the structure, the vocabulary, and the style of the lyrics. We excluded from this group the frequencies of part-of-speech tags, as they depend on the tagger used.
\item \textit{fuzzy} ($v \in \mathbb{R}^{18}$): it includes the features generated in a non-deterministic fashion such as orientation, emotion, and the frequencies of part-of-speech tags.
\end{itemize}
All features are scaled using a custom feature scaler that combines two elements: i) account for outliers by scaling the data non-linearly based on the percentile of the feature value distribution they belong to; ii) scale the data linearly to the same $[-1,1]$ interval that non-lyrics features live in.
Retrieving lyrics for the MPD dataset is achieved by linking it to the WASABI corpus~\cite{meseguerbrocal:hal-01589250}.\footnote{\url{https://wasabi.i3s.unice.fr}} The WASABI corpus is an ongoing resource that contains 2.1M song texts (of 77k artists), and for each song it provides the following information: the lyrics extracted from \url{http://lyrics.wikia.com}, the synchronized lyrics (when available) from \url{http://usdb.animux.de}, DBpedia abstracts and categories the song belongs to, genre, label, writer, release date, awards, producers, artist and/or band members, the stereo audio track from Deezer (when available), the unmixed audio tracks of the song, its ISRC, BPM, and duration. In total, we linked 416k tracks in MPD (out of 2.2M unique tracks) to WASABI tracks that contain the lyrics. While the linked tracks proportion with $\sim$20\% seems small, the linked tracks cover 53\% of all 66M track occurrences in MPD because of the typical fat-tailed distribution, where some songs are extremely common while most titles occur only rarely in a playlist. Linking the lyrics was done in three levels of accuracy: direct Spotify URI matching gave us 155k links, exact artist and title matching provided 334k matches, and finally lower casing and deleting bracketed content (in song titles only) led to 51k matches. As the results overlap we ended up with 416k matched tracks in total. Some of our lyrics features are language-specific, so we decided to compute lyrics features exclusively on English song texts. This finally resulted in 367k English song texts we computed lyrical features on. Language detection is done with the \textit{langdetect} package\footnote{\url{https://github.com/Mimino666/langdetect}} and datasets of MPD and WASABI are merged along the axes of their Spotify URIs, artist names, song title names, respectively.
\subsection{Learning Model}
As mentioned earlier, we address the problem of playlist continuation as a language modeling problem. More specifically, we train the RNN to predict the next track in a playlist, defining the targets $Y$ to be the inputs $X$ shifted in time, i.e. $X = \{(\hat{T{^j}}_0, \hat{T{^j}}_1, \dots, \hat{T^{j}}_{N_j -1})\}$ and $Y = \{(T{^j}_1, T{^j}_2, \dots, T^{j}_{N_j})\}$ where $\hat{T}$ represents a track and its metadata (artist, album, playlist title, lyrics features), $T$ represents a track id in a playlist, $j = 1, \dots, M$ is a playlist index and $N_j$ is the length of the j-\textit{th} playlist. In this way, we train the model to learn a probability distribution of the next track $P (T_N | \hat{T}_{N-1}, \hat{T}_{N-2}, \dots, \hat{T}_{0})$ given the previous ones, which is parametrized by the network outputs that are converted into probabilities by the final softmax layer (Figure~\ref{fig:global_architecture}). The training algorithm attempts to minimize the cross-entropy loss function $L$, that measures the disagreement between the learned probability model and the observed probability model of the targets $Y$. The perplexity metric that is reported in the experiments (Section~\ref{sec:rnn-opt}) corresponds to $ppl = 2^{L}$. In practice, rather than using probabilities, we use the `logits' $p_i$ where $i$ is a track index, un-normalized scores that are proportional to the probabilities. Different optimization algorithms to minimize the loss are empirically compared (Section~\ref{sec:rnn-opt}).
\subsection{Generating predictions}
\label{sec:generation}
We experiment three different strategies to generate track predictions from the RNN. Given an input seed and the hidden state, the trained model outputs the logits $p_i$, i.e. un-normalized scores that are proportional to the probability that a given track appears after the sequence of seeds $s$. In details, we considered the following approaches, as depicted in Figure~\ref{fig:predictions}.
\begin{description}
\item[do\_sample] It samples the track with the highest logit $p_i$, where $\hat{i} = arg\ max ({p_i})$, given the set of seeds $s$. It adds the sampled track $\hat{i}$ to the seeds $s$, then it repeats the previous operations until 500 tracks are sampled.
\item[do\_rank] It ranks the tracks according to their logit value $p_i$, given all the seeds $s$, then it selects the top-500 tracks with the highest logit.
\item[do\_summed\_rank] It computes the logits $p_i$ for every seed. It averages all the logits in the sequence obtaining $\hat{p_i}$ and then it ranks the tracks according to the values of $\hat{p_i}$.
\end{description}
\begin{figure}
\centering
\begin{subfigure}{0.4\textwidth}
\includegraphics[width=\textwidth]{figures/sample.pdf}
\caption{do\_sample}
\end{subfigure}
\begin{subfigure}{0.4\textwidth}
\includegraphics[width=\textwidth]{figures/rank.pdf}
\caption{do\_rank}
\end{subfigure}
\begin{subfigure}{0.4\textwidth}
\includegraphics[width=\textwidth]{figures/summed.pdf}
\caption{do\_summed\_rank}
\end{subfigure}
\caption{Three strategies for generating track predictions.}
\label{fig:predictions}
\end{figure}
|
{"hexsha": "6c45741717b1b7eec0715e642e40e568b47454c9", "size": 13500, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/sections/rnn.tex", "max_stars_repo_name": "D2KLab/recsys18_challenge", "max_stars_repo_head_hexsha": "5cd47d1b9df2a2bccad2889ba1d570d5a8dd0f8d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-11-09T14:04:04.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-29T15:56:45.000Z", "max_issues_repo_path": "paper/sections/rnn.tex", "max_issues_repo_name": "D2KLab/recsys18_challenge", "max_issues_repo_head_hexsha": "5cd47d1b9df2a2bccad2889ba1d570d5a8dd0f8d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/sections/rnn.tex", "max_forks_repo_name": "D2KLab/recsys18_challenge", "max_forks_repo_head_hexsha": "5cd47d1b9df2a2bccad2889ba1d570d5a8dd0f8d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-12-11T03:03:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T20:09:29.000Z", "avg_line_length": 146.7391304348, "max_line_length": 1857, "alphanum_fraction": 0.7804444444, "num_tokens": 3366}
|
#include <boost/test/unit_test.hpp>
#include <cpp-utils/pattern/registry.h>
#include <cpp-utils/algorithm/container.h>
using namespace cpp;
namespace {
class Module
{
public:
virtual ~Module() = default;
virtual void init() = 0;
int init_called = 0;
};
class MyModule1 : public Module
{
void init() override { init_called++; }
};
class MyModule2 : public Module
{
void init() override { init_called++; }
};
registry<Module>::element<MyModule1> MyModuleNode1("module1", "description1");
registry<Module>::element<MyModule2> MyModuleNode2("module2", "description1");
} // namespace
BOOST_AUTO_TEST_SUITE(Registry_Tests)
BOOST_AUTO_TEST_CASE(default_use_case)
{
auto entries = registry<Module>::entries();
BOOST_CHECK_EQUAL(2, std::distance(entries.begin(), entries.end()));
std::vector<std::string> names;
cpp::transform(entries, names, [](auto& entry) { return entry.name(); });
cpp::sort(names);
BOOST_CHECK(
cpp::equal(names, make_list({"module1", "module2"}))
);
std::vector<std::unique_ptr<Module>> modules;
cpp::transform(entries, modules, [](auto& entry) { return entry.create(); });
cpp::for_each(modules, [](auto& module){ module->init(); });
BOOST_CHECK(
cpp::all_of(modules, [](auto& module){ return module->init_called == 1; })
);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "f3140907da54e34e384f1822a5768483fd8dce27", "size": 1328, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/pattern/registry.cpp", "max_stars_repo_name": "R1tschY/cpp-utils", "max_stars_repo_head_hexsha": "d03c16cf0c2503770a201b9499a9aebbcdd0a694", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/pattern/registry.cpp", "max_issues_repo_name": "R1tschY/cpp-utils", "max_issues_repo_head_hexsha": "d03c16cf0c2503770a201b9499a9aebbcdd0a694", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/pattern/registry.cpp", "max_forks_repo_name": "R1tschY/cpp-utils", "max_forks_repo_head_hexsha": "d03c16cf0c2503770a201b9499a9aebbcdd0a694", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.298245614, "max_line_length": 79, "alphanum_fraction": 0.6935240964, "num_tokens": 340}
|
from typing import List, Dict
import numpy as np
import torch as t
import torch.nn.functional as F
from keras_preprocessing import sequence
from sklearn.feature_extraction.text import HashingVectorizer
from unify_eval.model.mixins.classification import Classifier
from unify_eval.model.types import Tensor
from unify_eval.utils.label_mapper import LabelMapper
class FastText(t.nn.Module):
"""
Pytorch implementation of FastText.
"""
def __init__(self, embedding_bag: t.nn.EmbeddingBag, clf: t.nn.Module) -> None:
super().__init__()
self.embedding_bag = embedding_bag
self.clf = clf
def forward(self, indices: t.Tensor) -> t.Tensor:
return self.clf(self.embedding_bag.forward(indices))
class Ensemble(t.nn.Module):
"""
Weighted Ensemble of different sub-models.
"""
def __init__(self, models: List[t.nn.Module]) -> None:
super().__init__()
self.models = t.nn.ModuleList(models)
self.model_weights = t.nn.Linear(in_features=len(models), out_features=1)
def forward(self, x: t.Tensor) -> t.Tensor:
raw_predictions = t.stack([model(x) for model in self.models], dim=-1)
return self.model_weights(raw_predictions).view((-1, raw_predictions.shape[-2]))
class EnsembleModel(Classifier):
"""
Text classifier that implements a linear combination over simpler models
"""
def __init__(self,
label_mapper: LabelMapper,
ensemble: Ensemble,
hashing_vectorizer: HashingVectorizer,
max_features: int,
text_kw: str = "texts",
label_kw: str = "labels"
):
"""
:param label_mapper: LabelMapper instance mapping label name to respective index and vice versa
:param ensemble: actual pytorch model
:param hashing_vectorizer: hashing vectorizer generating ngram features
:max_len: maximum number of ngram features used.
"""
super().__init__(label_mapper)
self.ensemble = ensemble
self.hashing_vectorizer = hashing_vectorizer
self.max_features = max_features
self.text_kw = text_kw
self.label_kw = label_kw
self._xent = t.nn.CrossEntropyLoss()
self._opt = t.optim.Adam(params=list(self.ensemble.parameters()))
self._opt.zero_grad()
def preprocess_clauses(self, clauses: List[str]) -> t.Tensor:
"""
maps list of clauses to padded sequence of ngram indices
"""
onehots = self.hashing_vectorizer.transform(clauses).toarray()
sequences = np.array([np.arange(d.shape[-1])[d > 0.1] for d in onehots])
return t.from_numpy(sequence.pad_sequences(sequences=sequences, maxlen=self.max_features)).long()
def predict_label_probabilities(self, **kwargs) -> Tensor:
return F.softmax(self.get_logits(**kwargs), dim=-1).detach().numpy()
def get_logits(self, **kwargs) -> Tensor:
indices = self.preprocess_clauses(clauses=kwargs[self.text_kw])
return self.ensemble.forward(x=indices)
def train(self, **kwargs) -> "EnsembleModel":
loss = self.get_loss(as_tensor=True, **kwargs)["cross_entropy"]
loss.backward()
self._opt.step()
self._opt.zero_grad()
return self
def get_loss(self, as_tensor: bool = False, **kwargs) -> Dict[str, Tensor]:
y_true = t.from_numpy(self.label_mapper.map_to_indices(kwargs[self.label_kw])).long()
y_pred = self.get_logits(**kwargs)
loss = self._xent.forward(input=y_pred, target=y_true)
if not as_tensor:
loss = loss.detach().numpy()
return {
"cross_entropy": loss
}
@staticmethod
def from_components(**kwargs) -> "EnsembleModel":
return EnsembleModel(**kwargs)
def get_components(self) -> dict:
return {
"ensemble": self.ensemble,
"hashing_vectorizer": self.hashing_vectorizer,
"max_len": self.max_features,
"label_mapper": self.label_mapper,
"text_kw":self.text_kw,
"label_kw":self.label_kw
}
def get_numpy_parameters(self) -> Dict[str, np.ndarray]:
return dict((name, p.detach().numpy()) for name, p in self.ensemble.named_parameters())
|
{"hexsha": "c4e3b814aeaf43f5347aeb1ce8a16011895966d7", "size": 4334, "ext": "py", "lang": "Python", "max_stars_repo_path": "unify_eval/model/ensemble.py", "max_stars_repo_name": "goesslfabian/unify-eval", "max_stars_repo_head_hexsha": "ced486e44ca57ed31b552fd20b53cae61015e486", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-18T10:40:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T10:20:54.000Z", "max_issues_repo_path": "unify_eval/model/ensemble.py", "max_issues_repo_name": "goesslfabian/unify-eval", "max_issues_repo_head_hexsha": "ced486e44ca57ed31b552fd20b53cae61015e486", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-11-13T19:00:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:10:28.000Z", "max_forks_repo_path": "unify_eval/model/ensemble.py", "max_forks_repo_name": "goesslfabian/unify-eval", "max_forks_repo_head_hexsha": "ced486e44ca57ed31b552fd20b53cae61015e486", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-23T12:37:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-23T12:37:12.000Z", "avg_line_length": 37.0427350427, "max_line_length": 105, "alphanum_fraction": 0.6469773881, "include": true, "reason": "import numpy", "num_tokens": 943}
|
import numpy as np
import pandas as pd
import mesa_reader as mr
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import glob
from cosmic.sample.initialbinarytable import InitialBinaryTable
from cosmic.evolve import Evolve
#-------------------------------------------------------------------#
## find pre common envelope data in MESA
def before_CE(data, CE):
if np.any(CE == 1):
CE_index = np.argwhere(CE)[0].item()
before_CE_data = data[:CE_index]
return before_CE_data
## return None if there is no common envelope in the evolution
else: return None
#-------------------------------------------------------------------#
## calculate BH accretion rate in MESA
def calc_acc_rate(lg_mtransfer_rate, xfer_frac):
return np.log10((10**lg_mtransfer_rate)*xfer_frac)
#-------------------------------------------------------------------#
## read all MESA history files and store selected data in arrays
def read_all_files(filepath):
## constants
Myr = 1e6 ## years per Myr
Lsun = 3.839e33 ## erg/sec per solar luminosity
all_files = glob.glob(filepath)
filenames = []
## initialize lists for the parameters you want
all_models = []
all_ages = []
all_acc_lum = []
all_donor_masses = []
all_lg_BH_acc_rate = []
all_has_CE = []
all_He_core_mass = []
all_BH_mass = []
all_rl_overflow = []
all_edd_acc_rate = []
all_c12 = []
all_wind_mdot_1 = []
all_data = [] ## 2D list to return; stores all of the above parameter lists
## iterate through the output files and save the data you want
for file in all_files:
print (file)
filenames.append(file)
history = mr.MesaData(file)
## read desired data from MESA
model = history.data("model_number")
age = history.data("star_age")
CE = history.data("CE_flag")
lg_accretion_luminosity = history.data("lg_accretion_luminosity")
donor_mass = history.data("star_1_mass")
lg_mtransfer_rate = history.data("lg_mtransfer_rate")
he_core_mass = history.data("he_core_mass")
BH_mass = history.data("star_2_mass")
rl_overflow_1 = history.data("rl_relative_overflow_1")
lg_edd_accretion_rate = history.data("lg_mdot_edd")
c12 = history.data("center_c12")
lg_wind_mdot_1 = history.data("lg_wind_mdot_1")
porb = history.data("period_days")
# determine if this system has a common envelope
has_CE = False
if before_CE(model, CE) is not None: has_CE = True
## append desired data to lists
all_models.append(model)
all_acc_lum.append(10**lg_accretion_luminosity*Lsun)
all_has_CE.append(has_CE)
all_ages.append(age/Myr)
all_donor_masses.append(donor_mass)
all_He_core_mass.append(he_core_mass)
all_BH_mass.append(BH_mass)
all_rl_overflow.append(rl_overflow_1)
all_edd_acc_rate.append(10**lg_edd_accretion_rate)
all_c12.append(c12)
all_data.extend([all_models, all_acc_lum, all_has_CE, all_ages, all_donor_masses, all_lg_BH_acc_rate])
all_data.extend([all_He_core_mass, all_BH_mass, all_rl_overflow, all_edd_acc_rate, all_c12, filenames])
return all_data
#-------------------------------------------------------------------#
# plot chosen data for all history files with common envelope distinction
def plot_all_files_CE(xdata, ydata, CEdata, xlower, xupper, ylower, yupper, xlabel, ylabel):
fig, ax = plt.subplots(figsize=(10,7))
for i in range(0, len(all_data[0])):
if CEdata[i]:
color="orange"
else:
color="cornflowerblue"
ax.plot(xdata[i], ydata[i], color=color)
ax.set_xlim(xlower, xupper)
ax.set_ylim(ylower, yupper)
ax.set_ylabel(ylabel, size=14)
ax.set_xlabel(xlabel, size=14)
CE_key = mpatches.Patch(color='orange', label='Common Envelope')
no_CE_key = mpatches.Patch(color='cornflowerblue', label='No Common Envelope')
plt.legend(handles=[CE_key, no_CE_key])
plt.show()
#-------------------------------------------------------------------#
## define your MESA output folder and acquire the parameter data you want
path = "history_files/*.data"
all_data = read_all_files(path)
## MESA ONLY PLOTS
## a simple MESA plot
## plot BH mass vs time for one binary
fig, ax = plt.subplots(figsize=(7,5))
ax.plot(all_data[3][0], all_data[7][0], color='green')
ax.set_xlabel("Star Age (Myr)", size=13)
ax.set_ylabel("BH Mass (Msun)", size=13)
ax.set_xlim(6., 6.65)
plt.show()
## an example MESA plot using the CE plotting function
## plot BH accretion luminosty vs time and distinguish between CE/non-CE systems
plot_all_files_CE(all_data[3], all_data[1], all_data[2], 6.5, 6.64 , 1e30, 2.5e39, \
"Time (Myr)", "BH Accretion Luminosity (erg/sec)")
## MESA PLOTS THAT COMPARE WITH BSE/COSMIC
## initialize single binary with parameters from MESA run
MESA_BSEDict = {'xi': 0.0, 'bhflag': 0, 'neta': 1.0, 'windflag': 3, 'wdflag': 0, 'alpha1': 1.0, 'pts1': 0.001, 'pts3': 0.02, 'pts2': 0.01, 'epsnov': 0.001, 'hewind': 1.0, 'ck': -1000, 'bwind': 0.0, 'lambdaf': -1.0, 'mxns': 2.5, 'beta': 1./8., 'tflag': 1, 'acc2': 1.5, 'nsflag': 4, 'ceflag': 1, 'eddfac': 1.0, 'ifflag': 0, 'bconst': -3000, 'sigma': 0.0, 'gamma': -2.0, 'pisn': -2.0, 'natal_kick_array' : [-100.0,-100.0,-100.0,-100.0,-100.0,-100.0], 'bhsigmafrac' : 1.0, 'polar_kick_angle' : 90, 'qcrit_array' : [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0], 'cekickflag' : 2, 'cehestarflag' : 0, 'cemergeflag' : 1, 'ecsn' : 2.5, 'ecsn_mlow' : 1.4, 'aic' : 1, 'ussn' : 0, 'sigmadiv' :20.0, 'qcflag' : 2, 'eddlimflag' : 0, 'fprimc_array' : [2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0,2.0/21.0], 'bhspinflag' : 0, 'bhspinmag' : 0.0, 'rejuv_fac' : 1.0, 'rejuvflag' : 0, 'htpmb' : 1, 'ST_cr' : 1, 'ST_tide' : 0, 'bdecayfac' : 1}
single_binary = InitialBinaryTable.InitialBinaries(m1=31.62277650725363, m2=4.74341649025, porb=1174.8975610426348,ecc=0.0, tphysf=13700.0, kstar1=1, kstar2=14, metallicity=0.00142)
bpp, bcm, initC = Evolve.evolve(initialbinarytable=single_binary, BSEDict=MESA_BSEDict, dtp=0.01)
## plot desired parameters for MESA and COSMIC
fig, ax, = plt.subplots(figsize=(8, 5))
ax.plot(bcm['tphys'], bcm['mass_2'], label='COSMIC')
ax.plot(all_data[3][0], all_data[7][0], label='MESA')
ax.set_xlabel("Time (Myr)", size=13)
ax.set_ylabel("BH Mass (Msun)", size=13)
ax.set_xlim(6.3, 7.6)
ax.set_ylim(4.73, 4.78)
plt.title("Mergers in MESA vs. COSMIC", size=14)
plt.legend()
plt.show()
|
{"hexsha": "5fa7679360e342ece4dcd767093700cef8d680c9", "size": 6849, "ext": "py", "lang": "Python", "max_stars_repo_path": "MESA_plotting.py", "max_stars_repo_name": "celiotine/CIERA_code", "max_stars_repo_head_hexsha": "c4fd6b5dec9397fed4aa2a73dae311edcde6ed81", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MESA_plotting.py", "max_issues_repo_name": "celiotine/CIERA_code", "max_issues_repo_head_hexsha": "c4fd6b5dec9397fed4aa2a73dae311edcde6ed81", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MESA_plotting.py", "max_forks_repo_name": "celiotine/CIERA_code", "max_forks_repo_head_hexsha": "c4fd6b5dec9397fed4aa2a73dae311edcde6ed81", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3041237113, "max_line_length": 1034, "alphanum_fraction": 0.6197985107, "include": true, "reason": "import numpy", "num_tokens": 2196}
|
import numpy as np
import pytest
from orix.quaternion.orientation import Orientation, Misorientation
from orix.quaternion.symmetry import C1, C2, C3, C4, D2, D3, D6, T, O
from orix.vector import Vector3d
@pytest.fixture
def vector(request):
return Vector3d(request.param)
@pytest.fixture(params=[(0.5, 0.5, 0.5, 0.5), (0.5 ** 0.5, 0, 0, 0.5 ** 0.5)])
def orientation(request):
return Orientation(request.param)
@pytest.mark.parametrize(
"orientation, symmetry, expected",
[
([(1, 0, 0, 0)], C1, [(1, 0, 0, 0)]),
([(1, 0, 0, 0)], C4, [(1, 0, 0, 0)]),
([(1, 0, 0, 0)], D3, [(1, 0, 0, 0)]),
([(1, 0, 0, 0)], T, [(1, 0, 0, 0)]),
([(1, 0, 0, 0)], O, [(1, 0, 0, 0)]),
# 7pi/12 -C2-> # 7pi/12
([(0.6088, 0, 0, 0.7934)], C2, [(-0.7934, 0, 0, 0.6088)]),
# 7pi/12 -C3-> # 7pi/12
([(0.6088, 0, 0, 0.7934)], C3, [(-0.9914, 0, 0, 0.1305)]),
# 7pi/12 -C4-> # pi/12
([(0.6088, 0, 0, 0.7934)], C4, [(-0.9914, 0, 0, -0.1305)]),
# 7pi/12 -O-> # pi/12
([(0.6088, 0, 0, 0.7934)], O, [(-0.9914, 0, 0, -0.1305)]),
],
indirect=["orientation"],
)
def test_set_symmetry(orientation, symmetry, expected):
o = orientation.set_symmetry(symmetry)
assert np.allclose(o.data, expected, atol=1e-3)
@pytest.mark.parametrize(
"symmetry, vector",
[(C1, (1, 2, 3)), (C2, (1, -1, 3)), (C3, (1, 1, 1)), (O, (0, 1, 0))],
indirect=["vector"],
)
def test_orientation_persistence(symmetry, vector):
v = symmetry.outer(vector).flatten()
o = Orientation.random()
oc = o.set_symmetry(symmetry)
v1 = o * v
v1 = Vector3d(v1.data.round(4))
v2 = oc * v
v2 = Vector3d(v2.data.round(4))
assert v1._tuples == v2._tuples
@pytest.mark.parametrize(
"orientation, symmetry, expected",
[
((1, 0, 0, 0), C1, [0]),
([(1, 0, 0, 0), (0.7071, 0.7071, 0, 0)], C1, [[0, np.pi / 2], [np.pi / 2, 0]]),
([(1, 0, 0, 0), (0.7071, 0.7071, 0, 0)], C4, [[0, np.pi / 2], [np.pi / 2, 0]]),
([(1, 0, 0, 0), (0.7071, 0, 0, 0.7071)], C4, [[0, 0], [0, 0]]),
(
[
[(1, 0, 0, 0), (0.7071, 0, 0, 0.7071)],
[(0, 0, 0, 1), (0.9239, 0, 0, 0.3827)],
],
C4,
[
[[[0, 0], [0, np.pi / 4]], [[0, 0], [0, np.pi / 4]]],
[[[0, 0], [0, np.pi / 4]], [[np.pi / 4, np.pi / 4], [np.pi / 4, 0]]],
],
),
],
indirect=["orientation"],
)
def test_distance(orientation, symmetry, expected):
o = orientation.set_symmetry(symmetry)
distance = o.distance(verbose=True)
assert np.allclose(distance, expected, atol=1e-3)
@pytest.mark.parametrize("symmetry", [C1, C2, C4, D2, D6, T, O])
def test_getitem(orientation, symmetry):
o = orientation.set_symmetry(symmetry)
assert o[0].symmetry._tuples == symmetry._tuples
@pytest.mark.parametrize("Gl", [C4, C2])
def test_equivalent(Gl):
""" Tests that the property Misorientation.equivalent runs without error,
use grain_exchange=True as this falls back to grain_exchange=False when
Gl!=Gr:
Gl == C4 is grain exchange
Gl == C2 is no grain exchange
"""
m = Misorientation([1, 1, 1, 1]) # any will do
m_new = m.set_symmetry(Gl, C4, verbose=True)
m_new.symmetry
_m = m_new.equivalent(grain_exchange=True)
def test_repr():
m = Misorientation([1, 1, 1, 1]) # any will do
print(m) # hits __repr__
return None
def test_sub():
m = Orientation([1, 1, 1, 1]) # any will do
m.set_symmetry(C4) # only one as it a O
_ = m - m # this should give a set of zeroes
return None
@pytest.mark.xfail(strict=True, reason=TypeError)
def test_sub_orientation_and_other():
m = Orientation([1, 1, 1, 1]) # any will do
_ = m - 3
|
{"hexsha": "ad7da85a0bd50af4c7011598aae5d06b21a22c93", "size": 3829, "ext": "py", "lang": "Python", "max_stars_repo_path": "orix/tests/test_orientation.py", "max_stars_repo_name": "JoonatanL/orix", "max_stars_repo_head_hexsha": "b8e6dfe4c3ac053e923b001b9bccec717db3e56c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-28T01:57:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-28T01:57:35.000Z", "max_issues_repo_path": "orix/tests/test_orientation.py", "max_issues_repo_name": "dnjohnstone/orix", "max_issues_repo_head_hexsha": "b8e6dfe4c3ac053e923b001b9bccec717db3e56c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "orix/tests/test_orientation.py", "max_forks_repo_name": "dnjohnstone/orix", "max_forks_repo_head_hexsha": "b8e6dfe4c3ac053e923b001b9bccec717db3e56c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8790322581, "max_line_length": 87, "alphanum_fraction": 0.5356489945, "include": true, "reason": "import numpy", "num_tokens": 1489}
|
// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
/*
This example shows how to do instance segmentation on an image using net pretrained
on the PASCAL VOC2012 dataset. For an introduction to what instance segmentation is,
see the accompanying header file dnn_instance_segmentation_ex.h.
Instructions how to run the example:
1. Download the PASCAL VOC2012 data, and untar it somewhere.
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
2. Build the dnn_instance_segmentation_train_ex example program.
3. Run:
./dnn_instance_segmentation_train_ex /path/to/VOC2012
4. Wait while the network is being trained.
5. Build the dnn_instance_segmentation_ex example program.
6. Run:
./dnn_instance_segmentation_ex /path/to/VOC2012-or-other-images
An alternative to steps 2-4 above is to download a pre-trained network
from here: http://dlib.net/files/instance_segmentation_voc2012net_v2.dnn
It would be a good idea to become familiar with dlib's DNN tooling before reading this
example. So you should read dnn_introduction_ex.cpp and dnn_introduction2_ex.cpp
before reading this example program.
*/
#include "dnn_instance_segmentation_ex.h"
#include "pascal_voc_2012.h"
#include <iostream>
#include <dlib/data_io.h>
#include <dlib/gui_widgets.h>
using namespace std;
using namespace dlib;
// ----------------------------------------------------------------------------------------
int main(int argc, char** argv) try
{
if (argc != 2)
{
cout << "You call this program like this: " << endl;
cout << "./dnn_instance_segmentation_train_ex /path/to/images" << endl;
cout << endl;
cout << "You will also need a trained '" << instance_segmentation_net_filename << "' file." << endl;
cout << "You can either train it yourself (see example program" << endl;
cout << "dnn_instance_segmentation_train_ex), or download a" << endl;
cout << "copy from here: http://dlib.net/files/" << instance_segmentation_net_filename << endl;
return 1;
}
// Read the file containing the trained networks from the working directory.
det_anet_type det_net;
std::map<std::string, seg_bnet_type> seg_nets_by_class;
deserialize(instance_segmentation_net_filename) >> det_net >> seg_nets_by_class;
// Show inference results in a window.
image_window win;
matrix<rgb_pixel> input_image;
// Find supported image files.
const std::vector<file> files = dlib::get_files_in_directory_tree(argv[1],
dlib::match_endings(".jpeg .jpg .png"));
dlib::rand rnd;
cout << "Found " << files.size() << " images, processing..." << endl;
for (const file& file : files)
{
// Load the input image.
load_image(input_image, file.full_name());
// Find instances in the input image
const auto instances = det_net(input_image);
matrix<rgb_pixel> rgb_label_image;
matrix<float> label_image_confidence;
matrix<rgb_pixel> input_chip;
rgb_label_image.set_size(input_image.nr(), input_image.nc());
rgb_label_image = rgb_pixel(0, 0, 0);
label_image_confidence.set_size(input_image.nr(), input_image.nc());
label_image_confidence = 0.0;
bool found_something = false;
for (const auto& instance : instances)
{
if (!found_something)
{
cout << "Found ";
found_something = true;
}
else
{
cout << ", ";
}
cout << instance.label;
const auto cropping_rect = get_cropping_rect(instance.rect);
const chip_details chip_details(cropping_rect, chip_dims(seg_dim, seg_dim));
extract_image_chip(input_image, chip_details, input_chip, interpolate_bilinear());
const auto i = seg_nets_by_class.find(instance.label);
if (i == seg_nets_by_class.end())
{
// per-class segmentation net not found, so we must be using the same net for all classes
// (see bool separate_seg_net_for_each_class in dnn_instance_segmentation_train_ex.cpp)
DLIB_CASSERT(seg_nets_by_class.size() == 1);
DLIB_CASSERT(seg_nets_by_class.begin()->first == "");
}
auto& seg_net = i != seg_nets_by_class.end()
? i->second // use the segmentation net trained for this class
: seg_nets_by_class.begin()->second; // use the same segmentation net for all classes
const auto mask = seg_net(input_chip);
const rgb_pixel random_color(
rnd.get_random_8bit_number(),
rnd.get_random_8bit_number(),
rnd.get_random_8bit_number()
);
dlib::matrix<float> resized_mask(
static_cast<int>(chip_details.rect.height()),
static_cast<int>(chip_details.rect.width())
);
dlib::resize_image(mask, resized_mask);
for (int r = 0; r < resized_mask.nr(); ++r)
{
for (int c = 0; c < resized_mask.nc(); ++c)
{
const auto new_confidence = resized_mask(r, c);
if (new_confidence > 0)
{
const auto y = chip_details.rect.top() + r;
const auto x = chip_details.rect.left() + c;
if (y >= 0 && y < rgb_label_image.nr() && x >= 0 && x < rgb_label_image.nc())
{
auto& current_confidence = label_image_confidence(y, x);
if (new_confidence > current_confidence)
{
auto rgb_label = random_color;
const auto baseline_confidence = 5;
if (new_confidence < baseline_confidence)
{
// Scale label intensity if confidence isn't high
rgb_label.red *= new_confidence / baseline_confidence;
rgb_label.green *= new_confidence / baseline_confidence;
rgb_label.blue *= new_confidence / baseline_confidence;
}
rgb_label_image(y, x) = rgb_label;
current_confidence = new_confidence;
}
}
}
}
}
const Voc2012class& voc2012_class = find_voc2012_class(
[&instance](const Voc2012class& candidate) {
return candidate.classlabel == instance.label;
}
);
dlib::draw_rectangle(rgb_label_image, instance.rect, voc2012_class.rgb_label, 1);
}
// Show the input image on the left, and the predicted RGB labels on the right.
win.set_image(join_rows(input_image, rgb_label_image));
if (!instances.empty())
{
cout << " in " << file.name() << " - hit enter to process the next image";
cin.get();
}
}
}
catch(std::exception& e)
{
cout << e.what() << endl;
}
|
{"hexsha": "b864015b992bec3e33bc3aac63a1e670914782dd", "size": 7506, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/dnn_instance_segmentation_ex.cpp", "max_stars_repo_name": "babic95/dlib", "max_stars_repo_head_hexsha": "285f0255f6deef4e59e97f93023de112594c0741", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 11719.0, "max_stars_repo_stars_event_min_datetime": "2015-01-03T22:38:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:45:04.000Z", "max_issues_repo_path": "examples/dnn_instance_segmentation_ex.cpp", "max_issues_repo_name": "KiLJ4EdeN/dlib", "max_issues_repo_head_hexsha": "eb1f08ce6ab3ca6f9d10425d899103de3c0df56c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2518.0, "max_issues_repo_issues_event_min_datetime": "2015-01-04T04:38:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:55:43.000Z", "max_forks_repo_path": "examples/dnn_instance_segmentation_ex.cpp", "max_forks_repo_name": "KiLJ4EdeN/dlib", "max_forks_repo_head_hexsha": "eb1f08ce6ab3ca6f9d10425d899103de3c0df56c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3308.0, "max_forks_repo_forks_event_min_datetime": "2015-01-01T14:34:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:20:07.000Z", "avg_line_length": 39.2984293194, "max_line_length": 108, "alphanum_fraction": 0.5711430855, "num_tokens": 1584}
|
'''The barebone-essentials of weighted ordinary least squares and
a RANSAC-wrapper of it.
'''
import random
from collections import namedtuple
import numpy as np
from scipy import stats
WLSSolution = namedtuple("WLSSolution", [
'yhat', 'parameters', 'data', 'weights', 'residuals',
'projection_matrix', 'rss', 'press', 'R2'])
'''A structured container for :func:`weighted_linear_regression_fit`
output.
'''
SMALL_ERROR = 1e-5
def prepare_arrays_for_linear_fit(x, y, w=None):
"""Prepare data for estimating parameter values using the
weighted ordinary least squares method implemented in :func:`weighted_linear_regerssion_fit`
Parameters
----------
x : :class:`np.ndarray`
The data vector or matrix of predictors. Does not contain a common
intercept.
y : :class:`np.ndarray`
The response variable, should have the same outer dimension as x
w : :class:`np.ndarray`, optional
The optional weight matrix. If omitted, the identity matrix of appropriate
shape will be returned.
Returns
-------
X : :class:`np.ndarray`
The predictors, with a common intercept term added.
y : :class:`np.ndarray`
The response variable.
w : :class:`np.ndarray`
The weight matrix of X
"""
X = np.vstack((np.ones(len(x)), np.array(x))).T
Y = np.array(y)
if w is None:
W = np.eye(Y.shape[0])
else:
W = np.array(w)
return X, Y, W
def weighted_linear_regression_fit(x, y, w=None, prepare=False):
"""Fit a linear model using weighted least squares.
Parameters
----------
x : :class:`np.ndarray`
The data vector or matrix of predictors
y : :class:`np.ndarray`
The response variable, should have the same outer dimension as x
w : :class:`np.ndarray`, optional
The optional weight matrix
prepare : bool, optional
Whether or not to pass the parameters through :func:`prepare_arrays_for_linear_fit`
Returns
-------
WLSSolution
"""
if prepare:
x, y, w = prepare_arrays_for_linear_fit(x, y, w)
elif w is None:
w = np.eye(y.shape[0])
A = np.linalg.pinv(x.T.dot(w).dot(x)).dot(x.T.dot(w))
B = A.dot(y)
H = x.dot(A)
yhat = x.dot(B)
residuals = (y - yhat)
leave_one_out_error = residuals / (1 - np.diag(H))
press = (np.diag(w) * leave_one_out_error * leave_one_out_error).sum()
rss = (np.diag(w) * residuals * residuals).sum()
tss = (y - y.mean())
tss = (np.diag(w) * tss * tss).sum()
return WLSSolution(
yhat, B, (x, y), w, residuals, H,
rss, press, 1 - (rss / (tss)))
def ransac(x, y, w=None, max_trials=100):
'''
RANSAC Regression, inspired heavily by sklearn's
much more complex implementation
'''
X = x
residual_threshold = np.median(np.abs(y - np.median(y)))
if w is None:
w = np.eye(y)
def loss(y_true, y_pred):
return np.abs(y_true - y_pred)
n_trials = 0
n_samples = X.shape[0]
min_samples = X.shape[1] * 5
if min_samples > X.shape[0]:
min_samples = X.shape[1] + 1
if min_samples > X.shape[0]:
return weighted_linear_regression_fit(X, y, w)
sample_indices = np.arange(n_samples)
rng = random.Random(1)
n_inliers_best = 1
score_best = -np.inf
X_inlier_best = None
y_inlier_best = None
w_inlier_best = None
while n_trials < max_trials:
n_trials += 1
subset_ix = rng.sample(sample_indices, min_samples)
X_subset = X[subset_ix]
y_subset = y[subset_ix]
w_subset = np.diag(np.diag(w)[subset_ix])
# fit parameters on random subset of the data
fit = weighted_linear_regression_fit(X_subset, y_subset, w_subset)
# compute goodness of fit for the fitted parameters with
# the full dataset
yhat = np.dot(X, fit.parameters)
residuals_subset = loss(y, yhat)
# locate inliers based on residual threshold
inlier_subset_mask = residuals_subset < residual_threshold
n_inliers_subset = inlier_subset_mask.sum()
# determine the quality of the fitted parameters for
# the inliers using R2
inlier_subset_ix = sample_indices[inlier_subset_mask]
X_inlier_subset = X[inlier_subset_ix]
y_inlier_subset = y[inlier_subset_ix]
w_inlier_subset = np.diag(np.diag(w)[inlier_subset_ix])
# w_inlier_best = 1
yhat_inlier_subset = X_inlier_subset.dot(fit.parameters)
rss = (w_inlier_subset * np.square(
y_inlier_subset - yhat_inlier_subset)).sum()
tss = (w_inlier_subset * np.square(
y_inlier_subset - y_inlier_subset.mean())).sum()
score_subset = 1 - (rss / tss)
# If the number of inliers chosen hasn't improved and the score hasn't
# improved, don't update the current best
if n_inliers_subset < n_inliers_best and score_subset < score_best:
continue
score_best = score_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
w_inlier_best = w_inlier_subset
# fit the final best inlier set for the final parameters
return weighted_linear_regression_fit(X_inlier_best, y_inlier_best, w_inlier_best)
def prediction_interval(solution, x0, y0, alpha=0.05):
"""Calculate the prediction interval around `x0` with response
`y0` given the `solution`.
Parameters
----------
solution : :class:`WLSSolution`
The fitted model
x0 : :class:`np.ndarray`
The new predictors
y0 : float
The predicted response
alpha : float, optional
The prediction interval width. Defaults to 0.05
Returns
-------
:class:`np.ndarray` :
The lower and upper bound of the prediction interval.
"""
n = len(solution.residuals)
k = len(solution.parameters)
df = n - k
sigma2 = solution.rss / df
X = solution.data[0]
w = solution.weights
xtx_inv = np.linalg.pinv(X.T.dot(w).dot(X))
h = x0.dot(xtx_inv).dot(x0.T)
if not np.isscalar(h):
h = np.diag(h)
error_of_prediction = np.sqrt(sigma2 * (1 + h))
t = stats.t.isf(alpha / 2., df)
width = t * error_of_prediction
return np.stack([y0 - width, y0 + width])
|
{"hexsha": "4cc46f0f8b9d73d2297803ed3809aea404e7c666", "size": 6333, "ext": "py", "lang": "Python", "max_stars_repo_path": "glycan_profiling/scoring/elution_time_grouping/linear_regression.py", "max_stars_repo_name": "mstim/glycresoft", "max_stars_repo_head_hexsha": "1d305c42c7e6cba60326d8246e4a485596a53513", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "glycan_profiling/scoring/elution_time_grouping/linear_regression.py", "max_issues_repo_name": "mstim/glycresoft", "max_issues_repo_head_hexsha": "1d305c42c7e6cba60326d8246e4a485596a53513", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "glycan_profiling/scoring/elution_time_grouping/linear_regression.py", "max_forks_repo_name": "mstim/glycresoft", "max_forks_repo_head_hexsha": "1d305c42c7e6cba60326d8246e4a485596a53513", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1571428571, "max_line_length": 96, "alphanum_fraction": 0.6336649297, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1641}
|
module ModuleMacro where
record ⊤
: Set
where
module M where
module N where
postulate
A
: Set
B
: Set
module O
= M
module P
= M
module Q
= P
module R
(x : ⊤)
= N
using (A)
module S
= N
renaming
( A
to A'
; B
to B'
)
y
: ⊤
y
= record {O}
C
: ⊤
→ Set
C
= R.A
D
: Set
D
= S.B'
|
{"hexsha": "3b827bb55c0f1dff9c234869bfc33061f70373bb", "size": 359, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "data/declaration/ModuleMacro.agda", "max_stars_repo_name": "msuperdock/agda-unused", "max_stars_repo_head_hexsha": "f327f9aab8dcb07022b857736d8201906bba02e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-10-29T09:38:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T16:38:05.000Z", "max_issues_repo_path": "data/declaration/ModuleMacro.agda", "max_issues_repo_name": "msuperdock/agda-unused", "max_issues_repo_head_hexsha": "f327f9aab8dcb07022b857736d8201906bba02e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/declaration/ModuleMacro.agda", "max_forks_repo_name": "msuperdock/agda-unused", "max_forks_repo_head_hexsha": "f327f9aab8dcb07022b857736d8201906bba02e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-01T16:38:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T16:38:14.000Z", "avg_line_length": 6.1896551724, "max_line_length": 24, "alphanum_fraction": 0.4540389972, "num_tokens": 156}
|
[STATEMENT]
lemma combined_restrict_perm:
assumes "supp \<pi> \<sharp>* S" and [simp]: "finite S"
shows "combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
proof(cases p)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b. p = (a, b) \<Longrightarrow> combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
fix env :: AEnv and G :: CoCalls
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b. p = (a, b) \<Longrightarrow> combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
assume "p = (env, G)"
[PROOF STATE]
proof (state)
this:
p = (env, G)
goal (1 subgoal):
1. \<And>a b. p = (a, b) \<Longrightarrow> combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
p = (env, G)
goal (1 subgoal):
1. \<And>a b. p = (a, b) \<Longrightarrow> combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
supp \<pi> \<sharp>* S
finite S
[PROOF STEP]
have "env_restr S (\<pi> \<bullet> env) = env_restr S env"
[PROOF STATE]
proof (prove)
using this:
supp \<pi> \<sharp>* S
finite S
goal (1 subgoal):
1. (\<pi> \<bullet> env) f|` S = env f|` S
[PROOF STEP]
by (rule env_restr_perm)
[PROOF STATE]
proof (state)
this:
(\<pi> \<bullet> env) f|` S = env f|` S
goal (1 subgoal):
1. \<And>a b. p = (a, b) \<Longrightarrow> combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(\<pi> \<bullet> env) f|` S = env f|` S
goal (1 subgoal):
1. \<And>a b. p = (a, b) \<Longrightarrow> combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
supp \<pi> \<sharp>* S
finite S
[PROOF STEP]
have "cc_restr S (\<pi> \<bullet> G) = cc_restr S G"
[PROOF STATE]
proof (prove)
using this:
supp \<pi> \<sharp>* S
finite S
goal (1 subgoal):
1. (\<pi> \<bullet> G) G|` S = G G|` S
[PROOF STEP]
by (rule cc_restr_perm)
[PROOF STATE]
proof (state)
this:
(\<pi> \<bullet> G) G|` S = G G|` S
goal (1 subgoal):
1. \<And>a b. p = (a, b) \<Longrightarrow> combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
p = (env, G)
(\<pi> \<bullet> env) f|` S = env f|` S
(\<pi> \<bullet> G) G|` S = G G|` S
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
p = (env, G)
(\<pi> \<bullet> env) f|` S = env f|` S
(\<pi> \<bullet> G) G|` S = G G|` S
goal (1 subgoal):
1. combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
combined_restrict S (\<pi> \<bullet> p) = combined_restrict S p
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1250, "file": "Call_Arity_CoCallAnalysisImpl", "length": 15}
|
using JuMP, EAGO
m = Model()
EAGO.register_eago_operators!(m)
@variable(m, -1 <= x[i=1:5] <= 1)
@variable(m, -6.148474362391325 <= q <= 10.677081718106185)
add_NL_constraint(m, :(softplus(-0.2518902526786948 + 0.9847866884384731*softplus(0.2752793536861313 + -0.1568397657479923*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1062303426643516*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + 0.5642457669318222*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + -0.7649756572019379*softplus(0.9918790053549298 + 0.85981905523253*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + 0.9639515148457134*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.08816238542180388*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + 0.10087195663512549*softplus(-0.8202416682813327 + -0.17766510212211006*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1793020696087071*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.41892665263312834*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5])))) + softplus(0.638435553115452 + 0.09453389081965424*softplus(0.2752793536861313 + -0.1568397657479923*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1062303426643516*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + 0.5642457669318222*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + -0.07927014075141203*softplus(0.9918790053549298 + 0.85981905523253*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + 0.9639515148457134*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.08816238542180388*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + 0.2624391124261729*softplus(-0.8202416682813327 + -0.17766510212211006*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1793020696087071*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.41892665263312834*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5])))) + softplus(0.5273095272255199 + 0.4025366346817978*softplus(0.2752793536861313 + -0.1568397657479923*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1062303426643516*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + 0.5642457669318222*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + -0.585290488444234*softplus(0.9918790053549298 + 0.85981905523253*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + 0.9639515148457134*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.08816238542180388*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + 0.1647823489958915*softplus(-0.8202416682813327 + -0.17766510212211006*softplus(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1793020696087071*softplus(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.41892665263312834*softplus(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5])))) - $q <= 0.0))
@objective(m, Min, q)
return m
|
{"hexsha": "61530c0dbe7fb6f6aedc32a7f7ec684b06ded632", "size": 6369, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/06_softplus_5_3_3.jl", "max_stars_repo_name": "PSORLab/RSActivationFunctions", "max_stars_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/06_softplus_5_3_3.jl", "max_issues_repo_name": "PSORLab/RSActivationFunctions", "max_issues_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/06_softplus_5_3_3.jl", "max_forks_repo_name": "PSORLab/RSActivationFunctions", "max_forks_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 374.6470588235, "max_line_length": 6027, "alphanum_fraction": 0.6840948344, "num_tokens": 3065}
|
'''SGD classifier-- linear SVM. Try RADIAL BASIS FUNCTION SVM??? https://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html
cd bcws_psu_research/recursive_classifier/
mkdir out
python3 recursive_sgd.py stack.bin out/
todo: write inputs, accuracy etc, to log file!!!!!'''
import sys; sys.path.append("../py/")
import sklearn
import datetime
import numpy as np
from misc import *
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
skip_i = 7 # skip every i-th sample (10 -> 90% train, 2 -> 50% train, 4 - > 75% train)
sgd = SGDClassifier(random_state=42, verbose=False, max_iter=1000, tol=1.e-3)
if len(args) < 3:
err("sgd [input image stacked with binary ground-ref dat] [output dir]")
img_name, out_d = args[1], args[2]
if not exist(out_d) or not os.path.isdir(out_d):
err("output directory not found")
# add slash if not there
out_d = (out_d + os.path.sep) if out_d[-1] != os.path.sep else out_d
if not exist(args[1]):
err('could not find input image: ' + args[1])
# read multispectral image
ncol, nrow, nband, img = read_binary(args[1])
band_names = band_names(hdr_fn(args[1])) # assume first n bands are image, next m are groundref
n = 0
print("band names:", band_names)
for i in range(0, nband):
try:
x = int(band_names[i].split()[1])
n = i + 1
except:
break
print("number of image bands: " + str(n)); m = nband - n
print("number of groundref bands: " + str(m))
npx = nrow * ncol # number of pixels
# convert binary data to numpy format expected by sgd
img_np = bsq_to_scikit(ncol, nrow, n, img[0: n * npx])
for k in range(n, nband):
print("k", k, "__________________________")
ref = img[k * npx: (k + 1) * npx]
cls_name = band_names[k]
print("class name: " + cls_name)
# if cls_name != "WATER":
# continue
# count positives, negatives
ref_count = hist(ref)
print("ref_layer_count", ref_count)
NP, NN = ref_count[1.], ref_count[0.]
# force groundref map to bool (assume any merging etc. already done)
ref = np.array(ref)
ref = (ref != 0.)
# convert binary data to numpy format expected by sgd
img_np = bsq_to_scikit(ncol, nrow, nband, img)
# sample the data by skipping every skip_i'th element
n_samp = int(npx) - int(math.ceil(npx / skip_i))
img_samp, ref_samp = np.zeros((n_samp, nband)), np.zeros((n_samp))
nxt_i = 0 # index for next element to copy
for i in range(0, npx):
if i % skip_i != 0:
img_samp[nxt_i, :] = img_np[i, :]
ref_samp[nxt_i] = ref[i]
nxt_i += 1
# sanity check: make sure we didn't fudge our integer math
if nxt_i != n_samp:
print("nxt_i", nxt_i, "n_samp", n_samp)
err("unexpected n")
# init classifier
sgd = SGDClassifier(random_state=1,
loss='modified_huber', #'log', # modified_huber',
penalty='l2', # "elasticnet",
verbose=False,
max_iter=1000,
tol=.01) # 1.e-3
sgd.fit(img_samp, ref_samp) # fit on sampled data
pred = sgd.predict(img_np) # predict on full data
prob = sgd.predict_proba(img_np)
# need to validate this, and count accuracy
TP = TN = FN = FP = 0
for i in range(npx):
if ref[i]:
if pred[i]:
TP += 1
else:
FN += 1
else:
if not pred[i]:
TN += 1
else:
FP += 1
if True:
db = [0, 1, 2] # [3, 2, 1] # default band index
a = np.zeros((nrow, ncol, 3))
a[:, :, 0] = img_np[:, db[0]].reshape(nrow, ncol)
a[:, :, 1] = img_np[:, db[1]].reshape(nrow, ncol)
a[:, :, 2] = img_np[:, db[2]].reshape(nrow, ncol)
a = (a - np.min(a)) / np.max(a)
for i in range(0, 3):
d = a[:, :, i]
npx = nrow * ncol
values = d.reshape(np.prod(d.shape)).tolist()
values.sort()
mn = values[int(math.floor(float(npx) * 0.01))]
mx = values[int(math.floor(float(npx) * 0.99))]
rng = mx - mn
a[:, :, i] -= mn
if rng > 0.:
a[:, :, i] /= rng
(a[:, :, i])[a[:, :, i] < 0.] = 0.
(a[:, :, i])[a[:, :, i] > 1.] = 1.
plt.close('all')
f, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(1, 6) # , sharey=True)
plt.gcf().set_size_inches(6 * 6, 6.2)
# 1. raw image
ax1.imshow(a)
# 2. groundref
ax2.imshow(ref.reshape(nrow, ncol), cmap = 'binary_r') # dont' know why colour !
# 3. prediction
ax3.imshow(pred.reshape(nrow, ncol), cmap = 'binary_r')
# 4. probability: 0:npx because there's probabilities for two classes!
print(prob.shape)
ax4.imshow((prob[:, 1]).reshape(nrow, ncol), label='prob', cmap='gray') # two channels: one per class
# 5. histogram of probability:
hst, bins = np.histogram((prob[:, 1]).reshape(nrow, ncol), bins = 25) # 'auto')
hst = hst.ravel().tolist()
bins = bins.ravel().tolist()
bins = bins[0:len(hst)]
print("hst", hst, "|hst|", len(hst)) # hst.shape)
print("bins", bins, "|bins|", len(bins)) # bins.shape)
for i in range(0, len(hst)):
print(i, bins[i], hst[i])
# ax5.plot(bins[1:], hist)
ax5.bar(bins, hst, width = 1/25., align='edge')
# 6. projection of fit onto seed derived from thresholding probability
thres = 0.5
'''
ti = len(bins) - 1 # start at right of histogram
while hst[ti - 1] > hst[ti]: # go left until reach max
ti -= 1
thres = bins[ti]
while hst[ti - 1] < hst[ti]: # continue left until reach min
ti -= 1
thres = bins[ti]
print("automatically determined probability threshold", thres) '''
ref = ((prob[:, 1]).reshape(nrow, ncol))
ref = ref.ravel()
ref = ref > thres
ref = [1. if i==True else 0. for i in ref]
#img_samp, ref_samp = np.zeros((n_samp, nband)), np.zeros((n_samp))
ref_samp = np.zeros(n_samp)
nxt_i = 0 # index for next element to copy
for i in range(0, npx):
if i % skip_i != 0:
# img_samp[nxt_i, :] = img_np[i, :]
ref_samp[nxt_i] = ref[i]
nxt_i += 1
# sanity check: make sure we didn't fudge our integer math
if nxt_i != n_samp:
print("nxt_i", nxt_i, "n_samp", n_samp)
err("unexpected n")
sgd.fit(img_samp, ref_samp)
pred = sgd.predict(img_np) # predict on full data
ax6.imshow(pred.reshape(nrow, ncol), cmap = 'binary_r')
# set titles
ax1.set_title('image')
ax2.set_title('groundref P ' + str(NP) + ' N ' + str(NN)) #: ' + groundref_name)
ax3.set_title('predicted' + " TP " + str(TP) + " TN " + str(TN) + " FP " + str(FP) + " FN " + str(FN) ) #: ' + groundref_name)
ax4.set_title('probability(true)')
plt.tight_layout()
fn = out_d + 'plot_' + img_name + '_' + cls_name + '.png'
print("+w", fn)
plt.savefig(fn)
# write stats to log file
d = datetime.date.today()
date_str = str(d.year) + str(d.month).zfill(2) + str(d.day).zfill(2)
lfn = out_d + date_str + '_log.txt'
f = open(lfn, 'ab')
P, N = TP + FN, TN + FP
TPR, TNR = TP / P, TN / N
accuracy = (TP + TN) / (P + N)
balanced_accuracy = (TPR + TNR) / 2 # https://en.wikipedia.org/wiki/Precision_and_recall
log_data = [os.path.basename(__file__), img_name, cls_name, fn, TP, TN, FP, FN, accuracy, balanced_accuracy]
log_data = [str(log_d) for log_d in log_data]
for log_d in log_data:
if len(log_d.split(',') ) > 1:
err('delimiter should not be present within data')
print("write:\n\t" + str(log_data))
f.write((','.join(log_data) + '\n').encode())
f.close()
# next: probability, seeded prediction
|
{"hexsha": "aa1588c0f8bc77c40da69efda107d92f84512d9d", "size": 8221, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/recursive_supervised/recursive_sgd.py", "max_stars_repo_name": "breadcrumbbuilds/bcws-psu-research", "max_stars_repo_head_hexsha": "e541ffc050807186160a6b8d7cc6ac78fc9f3ddc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-10-23T19:02:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-23T22:51:25.000Z", "max_issues_repo_path": "py/recursive_supervised/recursive_sgd.py", "max_issues_repo_name": "breadcrumbbuilds/bcws-psu-research", "max_issues_repo_head_hexsha": "e541ffc050807186160a6b8d7cc6ac78fc9f3ddc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 142, "max_issues_repo_issues_event_min_datetime": "2020-02-08T00:37:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-12T21:49:23.000Z", "max_forks_repo_path": "py/recursive_supervised/recursive_sgd.py", "max_forks_repo_name": "breadcrumbbuilds/bcws-psu-research", "max_forks_repo_head_hexsha": "e541ffc050807186160a6b8d7cc6ac78fc9f3ddc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-12-11T00:45:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-22T08:54:59.000Z", "avg_line_length": 34.3974895397, "max_line_length": 139, "alphanum_fraction": 0.5440943924, "include": true, "reason": "import numpy", "num_tokens": 2451}
|
from astropy.io import fits
def hdr3Dto2D(hdr3D,verbose=True):
"""
Removing the wavelength component of a header, i.e., converting
the hdr from 3D (lambda, dec, ra) to 2D (dec, ra)
--- INPUT ---
hdr3d The header object to convert from (lambda, dec, ra) to (dec, ra)
verbose Toggle verbosity
"""
for key in hdr3D.keys():
if any(a in key for a in ('S3', '_3')):
del hdr3D[key]
return hdr3D
filename = 'dh_SL1_cube'
wvlnum = 5 # 0-indexed
with fits.open(filename+'.fits') as hdul0:
data0 = hdul0[0].data[wvlnum]
hdr0 = hdul0[0].header
data1 = hdul0[1].data[0][0] # 0D list; data type is FITS_rec([([[*],[*],...,])], dtype=...)
## WCS keywords
# CTYPE1 = hdr0['CTYPE1']
# CTYPE2 = hdr0['CTYPE2']
# CRPIX1 = hdr0['CRPIX1'] # ref ax
# CRPIX2 = hdr0['CRPIX2'] # ref ay
# CRVAL1 = hdr0['CRVAL1'] # ref ra
# CRVAL2 = hdr0['CRVAL2'] # ref dec
# CDELT1 = hdr0['CDELT1'] # delta ra
# CDELT2 = hdr0['CDELT2'] # delta dec
# PC1_1 = hdr0['PC1_1'] # rot matrix
# PC2_1 = hdr0['PC2_1']
# PC1_2 = hdr0['PC1_2']
# PC2_2 = hdr0['PC2_2']
## alternative header extracting
hdr = hdr3Dto2D(hdr0)
wvl = data1[0] + (data1[1] - data1[0]) * wvlnum
wvl = wvl[0]
hdr['CRPIX3'] = (wvlnum, "sliced wavelength number")
hdr['CRVAL3'] = (wvl, "current wavelength is {} um".format(wvl))
hdr.add_comment("The cube is sliced at {} um.".format(wvl))
## new fits file creating
primary_hdu = fits.PrimaryHDU(data0)
hdul = fits.HDUList(primary_hdu)
hdul[0].header = hdr # copy all (2D) header info
#hdr = hdul[0].header
#hdr['CTYPE1'] = CTYPE1
#hdr['CTYPE2'] = CTYPE2
#hdr['CRPIX1'] = CRPIX1
#hdr['CRPIX2'] = CRPIX2
#hdr['CRVAL1'] = CRVAL1
#hdr['CRVAL2'] = CRVAL2
#hdr['CDELT1'] = CDELT1
#hdr['CDELT2'] = CDELT2
#hdr['PC1_1'] = PC1_1
#hdr['PC2_1'] = PC2_1
#hdr['PC1_2'] = PC1_2
#hdr['PC2_2'] = PC2_2
#hdr['EQUINOX'] = 2000.0
hdul.writeto(filename+'_s.fits', output_verify='ignore', overwrite=True)
|
{"hexsha": "cc7bcc61eea01d6b6caa85a4b8f4231c03bcb13f", "size": 1901, "ext": "py", "lang": "Python", "max_stars_repo_path": "MIRAGE/arx/v0/slice.py", "max_stars_repo_name": "kxxdhdn/MISSILE", "max_stars_repo_head_hexsha": "89dea38aa9247f20c444ccd0b832c674be275fbf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MIRAGE/arx/v0/slice.py", "max_issues_repo_name": "kxxdhdn/MISSILE", "max_issues_repo_head_hexsha": "89dea38aa9247f20c444ccd0b832c674be275fbf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MIRAGE/arx/v0/slice.py", "max_forks_repo_name": "kxxdhdn/MISSILE", "max_forks_repo_head_hexsha": "89dea38aa9247f20c444ccd0b832c674be275fbf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.803030303, "max_line_length": 92, "alphanum_fraction": 0.6438716465, "include": true, "reason": "from astropy", "num_tokens": 779}
|
# Load modules
import csv
import copy
import cv2
import numpy as np
import sklearn
import matplotlib.pyplot as plt
import scipy.stats
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Flatten, Lambda
from keras.layers import Cropping2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
# Define hyper parameters
batch_size = 64
correction = 0.2
epochs = 3
model_file_name = 'model.h5'
class Model:
def __init__(self):
self.model = Sequential()
def load_driving_data(self, filepath='./testdrive1/', augment_flip_data=False, correction_value=0.2):
'''
Load driving data from specified file path
:param filepath:
:param augment_flip_data:
:param correction:
:return:
'''
driving_log = []
with open(filepath + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
# Load center image path and steering angle
driving_log.append([filepath + line[0], float(line[3]), 1])
# Load left image path and steering angle
driving_log.append([filepath + line[1], float(line[3]) + correction_value, 1])
# Load right image path and steering angle
driving_log.append([filepath + line[2], float(line[3]) - correction_value, 1])
# Add a duplicate of the driving log data as a flipped image & steering
if augment_flip_data == True:
augment_driving_log = copy.deepcopy(driving_log)
for log in augment_driving_log:
log[2] = 0 # Flag record as 'flipped' which will be used later for image flipping
log[1] = str(float(log[1]) * -1) # Flip steering measurement
driving_log = driving_log + augment_driving_log
# Return the data
return driving_log
def split_driving_data(self, data, test_size=0.2):
train_samples, validation_samples = train_test_split(data, test_size=test_size)
return train_samples, validation_samples
def random_brightness(self, image):
'''
Apply random brightness
:param image:
:return:
'''
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image[:, :, 2] = image[:, :, 2] * (.5 + np.random.uniform())
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
return image
def generator(self, samples, batch_size=32, augment_flip_data=False, augment_brightness=False):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
# name = './testdrive1/IMG/'+batch_sample[0].split('/')[-1]
name = batch_sample[0]
center_image = cv2.imread(name)
center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)
if augment_flip_data == True and batch_sample[2] == 0:
# Flip image
center_image = cv2.flip(center_image, 1)
# Apply random brightness
if augment_brightness == True:
center_image = self.random_brightness(center_image)
center_angle = float(batch_sample[1])
images.append(center_image)
angles.append(center_angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
def load_data(self, augment_flip_data=False):
'''
Loads and augment the driving data and returns the training and validation generator
:param augment_flip_data:
:return:
'''
# Load the image and steering data
# Load Track 1 driving log
# driving_log = load_driving_data(filepath='./drivelogs/track-1/', augment_flip_data=augment_flip_data)
driving_log = self.load_driving_data(filepath='./drivelogs/track-1-2/', augment_flip_data=augment_flip_data, correction_value=correction)
# Load track 1 in reverse driving log
driving_log = driving_log + self.load_driving_data(filepath='./drivelogs/track-1-reverse/', augment_flip_data=augment_flip_data, correction_value=correction)
# Load track 1 recovery driving log
# driving_log = driving_log + load_driving_data(filepath='./drivelogs/track-1-recovery/', augment_flip_data=augment_flip_data)
# driving_log = driving_log + load_driving_data(filepath='./drivelogs/track-1-recovery-2/', augment_flip_data=augment_flip_data)
# Load track 2 driving log
driving_log = driving_log + self.load_driving_data(filepath='./drivelogs/track-2-2/', augment_flip_data=False, correction_value=correction)
# driving_log = driving_log + load_driving_data(filepath='./drivelogs/track-2/', augment_flip_data=augment_flip_data)
# driving_log = driving_log + load_driving_data(filepath='./drivelogs/track-2-1/', augment_flip_data=augment_flip_data)
# Load track 2 in reverse
driving_log = driving_log + self.load_driving_data(filepath='./drivelogs/track-2-reverse/', augment_flip_data=augment_flip_data, correction_value=correction)
# Load track 2 recovery
driving_log = driving_log + self.load_driving_data(filepath='./drivelogs/track-2-recovery/', augment_flip_data=augment_flip_data, correction_value=correction)
print('Driving Log Samples: {}'.format(len(driving_log)))
train_samples, validation_samples = self.split_driving_data(driving_log, 0.2)
validation_samples, test_samples = self.split_driving_data(validation_samples, 0.3)
# compile and train the model using the generator function
train_generator = self.generator(train_samples, batch_size=batch_size, augment_flip_data=augment_flip_data, augment_brightness=True)
validation_generator = self.generator(validation_samples, batch_size=batch_size, augment_flip_data=augment_flip_data)
test_generator = self.generator(test_samples, batch_size=batch_size, augment_flip_data=augment_flip_data)
return train_samples, validation_samples, test_samples, train_generator, validation_generator, test_generator
def visualize_hist(self, data):
'''
Visualize the distribution of the label values
:param data:
:return:
'''
fig, axes = plt.subplots()
# axes.hist(classes, bins=10, histtype='stepfilled', stacked=True, alpha=0.8, density=True)
axes.hist(data, histtype='bar')
axes.set_title('Class distribution')
axes.legend(prop={'size': 10})
fig.tight_layout()
plt.show()
def visualize_normal_dist(self, data):
'''
Visualize the normal distribution of the labels
:param data:
:return:
'''
x = np.arange(np.min(data), np.max(data), 0.001)
y_train_mean = np.mean(data)
y_train_sd = np.std(data)
print('mean:', y_train_mean)
print('standard deviation', y_train_sd)
y_norm = scipy.stats.norm.pdf(x, y_train_mean, y_train_sd)
fig, ax = plt.subplots()
ax.plot(x, y_norm, '--')
plt.show()
def lenet(self):
# Layer 1
self.model.add(Conv2D(6, (5, 5), strides=1, activation='relu'))
self.model.add(MaxPooling2D(pool_size=2, strides=2))
# Layer 2
self.model.add(Conv2D(16, (5, 5), strides=1, activation='relu'))
self.model.add(MaxPooling2D(pool_size=2, strides=2))
# Layer 3
self.model.add(Flatten())
# Fully connected layer
self.model.add(Dense(120))
self.model.add(Dense(84))
def nvidia(self):
# Convolution layer 1
self.model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
# Convolution layer 2
self.model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
# Convolution layer 3
self.model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
# Convolution layer 4
self.model.add(Conv2D(64, (3, 3), activation='relu'))
# Convolution layer 5
self.model.add(Conv2D(64, (3, 3), activation='relu'))
# Flatten layer
self.model.add(Flatten())
# Connected layers
self.model.add(Dense(100))
self.model.add(Dense(50))
self.model.add(Dense(10))
def run_model(self, train_samples, validation_samples, train_generator, validation_generator):
# Preprocess incoming data, centered around zero with small standard deviation
self.model.add(Lambda(lambda x: x / 255.5 - 0.5, input_shape=(160, 320, 3)))
# Crop image. Crop 50 pixels from the top, 20 from the bottom, 0 from left, 0 from right
self.model.add(Cropping2D(cropping=((50, 20), (0, 0))))
# Load model
self.nvidia()
# Output to only predict steering measurement
self.model.add(Dense(1))
self.model.compile(loss='mse', optimizer='adam')
history_object = self.model.fit_generator(train_generator,
steps_per_epoch=len(train_samples)//batch_size,
validation_data=validation_generator,
validation_steps=len(validation_samples)//batch_size,
epochs=epochs,
)
# Output model summary
self.model.summary()
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
def save_model(self, model_name):
# Save model file
self.model.save(model_name)
def load_model(self, model_name):
self.model = load_model(model_name)
def training_pipeline(self):
# Load data
train_samples, validation_samples, test_samples, train_generator, validation_generator, test_generator = self.load_data(True)
# Visualize data
steering_data = np.array(train_samples)[:, 1]
self.visualize_hist(np.round(steering_data.astype(np.float), 6))
self.visualize_normal_dist(np.round(steering_data.astype(np.float), 3))
# Run model
self.run_model(train_samples, validation_samples, train_generator, validation_generator)
# Save model
self.save_model(model_file_name)
self.evaluate(test_generator, test_samples)
def evaluation_pipeline(self):
self.load_model(model_file_name)
train_samples, validation_samples, test_samples, train_generator, validation_generator, test_generator = self.load_data(True)
self.evaluate(test_generator, test_samples)
def evaluate(self, test_generator, test_samples):
score = self.model.evaluate_generator(test_generator, steps=len(test_samples) // batch_size)
print('Test loss:', score)
model = Model()
# Run training pipline
model.training_pipeline()
# Run evaluation pipeline
# model.evaluation_pipeline()
|
{"hexsha": "d51dcd21cdcc6e2d63f5085f3013d73003dfad83", "size": 12004, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "dalacan/behavioral-cloning", "max_stars_repo_head_hexsha": "d312380d5b1d1d655b2ffd7b9eb42f5c56795fdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-19T02:18:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-19T02:18:14.000Z", "max_issues_repo_path": "model.py", "max_issues_repo_name": "dalacan/behavioral-cloning", "max_issues_repo_head_hexsha": "d312380d5b1d1d655b2ffd7b9eb42f5c56795fdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "dalacan/behavioral-cloning", "max_forks_repo_head_hexsha": "d312380d5b1d1d655b2ffd7b9eb42f5c56795fdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8803986711, "max_line_length": 166, "alphanum_fraction": 0.6403698767, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2619}
|
# !!! The order of these imports needs to be preserved !!!
import imglyb
from imglyb import util
from jnius import autoclass, cast
# !!!
import multiprocessing
import numpy as np
import vigra
import h5py
def apply_wsgray(img):
# TODO we properly want grayscale types instead
#RealARGBConverter = autoclass( 'net.imglib2.converter.RealARGBConverter')
#Converters = autoclass( 'net.imglib2.converter.Converters' )
#ARGBType = autoclass ( 'net.imglib2.type.numeric.ARGBType' )
#RealType = autoclass ( 'net.imglib2.type.numeric.real.DoubleType' )
DistanceTransform = autoclass( 'net.imglib2.algorithm.morphology.distance.DistanceTransform' )
DISTANCE_TYPE = autoclass( 'net.imglib2.algorithm.morphology.distance.DistanceTransform$DISTANCE_TYPE' )
Views = autoclass( 'net.imglib2.view.Views' )
Executors = autoclass( 'java.util.concurrent.Executors' )
#t = ARGBType()
dt = np.zeros_like( img, dtype=img.dtype )
cpu_count = multiprocessing.cpu_count()
DistanceTransform.transform(
Views.extendBorder( util.to_imglib( -img ) ), # -img or img ?!?
util.to_imglib( dt ), DISTANCE_TYPE.EUCLIDIAN,
Executors.newFixedThreadPool( cpu_count ), cpu_count,
1e-4, 1e-4 )
return dt
if __name__ == '__main__':
pmap_p = '/home/consti/Work/data_neuro/CREMI/wsdt_test/cremi_sampleC_probs_cantorV1.h5'
with h5py.File(pmap_p) as f:
x = f['data'][1]
print x.shape
dt = apply_wsgray(x)
vigra.writeHDF5(dt, './tmp/imlyb_dt.h5', 'data')
|
{"hexsha": "671eb259525194ed8b6d75708b80083035bd0984", "size": 1553, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/imglyb/test_imglyb.py", "max_stars_repo_name": "constantinpape/watersheds", "max_stars_repo_head_hexsha": "9fde72b2df5aa0e3531969361b3a6c37be77ba8a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/imglyb/test_imglyb.py", "max_issues_repo_name": "constantinpape/watersheds", "max_issues_repo_head_hexsha": "9fde72b2df5aa0e3531969361b3a6c37be77ba8a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/imglyb/test_imglyb.py", "max_forks_repo_name": "constantinpape/watersheds", "max_forks_repo_head_hexsha": "9fde72b2df5aa0e3531969361b3a6c37be77ba8a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2954545455, "max_line_length": 108, "alphanum_fraction": 0.6973599485, "include": true, "reason": "import numpy", "num_tokens": 430}
|
# coding: utf-8
import hashlib
import numpy as np
import cv2
import os
def four_point_transform(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def extract_file_hashsum(image):
img = open(image, 'r')
content = img.read()
img.close()
hash_sum = hashlib.sha256(content)
return hash_sum.hexdigest()
|
{"hexsha": "f2c5a01b78a78f026d5da43685f980086fdb9e11", "size": 1001, "ext": "py", "lang": "Python", "max_stars_repo_path": "idmatch/idcardocr/core/processing/utils.py", "max_stars_repo_name": "javierherrera1996/idmatch", "max_stars_repo_head_hexsha": "8bb27dafaa12b7b0bdb745071e81e6b940b7553a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 55, "max_stars_repo_stars_event_min_datetime": "2017-05-27T11:13:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T21:22:28.000Z", "max_issues_repo_path": "idmatch/idcardocr/core/processing/utils.py", "max_issues_repo_name": "javierherrera1996/idmatch", "max_issues_repo_head_hexsha": "8bb27dafaa12b7b0bdb745071e81e6b940b7553a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2017-05-27T11:10:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T00:39:22.000Z", "max_forks_repo_path": "idmatch/idcardocr/core/processing/utils.py", "max_forks_repo_name": "javierherrera1996/idmatch", "max_forks_repo_head_hexsha": "8bb27dafaa12b7b0bdb745071e81e6b940b7553a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2017-05-30T19:08:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T00:19:25.000Z", "avg_line_length": 27.0540540541, "max_line_length": 70, "alphanum_fraction": 0.5544455544, "include": true, "reason": "import numpy", "num_tokens": 342}
|
import pygrib
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from cartopy.util import add_cyclic_point
import cartopy.crs as ccrs
grbs = pygrib.open('../sampledata/reduced_latlon_surface.grib2')
grb = grbs.readline()
data = grb.values
lats, lons = grb.latlons()
lons1 = lons[0,:]; lats1 = lats[:,0]
# add cyclic (wrap-around) point to global grid
data,lons1 = add_cyclic_point(data, coord=lons1)
lons,lats = np.meshgrid(lons1,lats1)
@pytest.mark.mpl_image_compare(tolerance=20,remove_text=True)
def test_reduced_ll():
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=0))
cs = ax.contourf(lons,lats,data,15)
ax.coastlines()
plt.title(grb.name)
return fig
# if running with GUI backend, show plot.
if matplotlib.get_backend().lower() != 'agg':
test_reduced_ll()
plt.show()
|
{"hexsha": "bd287185b6dc8dfbf8bfc8d5cd170036f7e3b4ec", "size": 872, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_reduced_ll.py", "max_stars_repo_name": "FelixDQ/pygrib", "max_stars_repo_head_hexsha": "5d1761f86b684661788d8297c9299f91b4bd714e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 283, "max_stars_repo_stars_event_min_datetime": "2015-01-12T01:56:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T10:10:48.000Z", "max_issues_repo_path": "test/test_reduced_ll.py", "max_issues_repo_name": "FelixDQ/pygrib", "max_issues_repo_head_hexsha": "5d1761f86b684661788d8297c9299f91b4bd714e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 149, "max_issues_repo_issues_event_min_datetime": "2015-01-23T18:31:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T01:41:50.000Z", "max_forks_repo_path": "test/test_reduced_ll.py", "max_forks_repo_name": "FelixDQ/pygrib", "max_forks_repo_head_hexsha": "5d1761f86b684661788d8297c9299f91b4bd714e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 103, "max_forks_repo_forks_event_min_datetime": "2015-02-03T10:08:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T01:58:30.000Z", "avg_line_length": 28.1290322581, "max_line_length": 67, "alphanum_fraction": 0.7362385321, "include": true, "reason": "import numpy", "num_tokens": 258}
|
(*
Author(s):
Felix Jahn (1)
Yannick Forster (1)
Affiliation(s):
(1) Saarland University, Saarbrücken, Germany
*)
Require Export Undecidability.Axioms.EA.
Require Export Undecidability.Shared.Pigeonhole.
Require Export Undecidability.Shared.FinitenessFacts.
Require Export Undecidability.Synthetic.reductions Undecidability.Synthetic.truthtables.
Require Export Undecidability.Synthetic.DecidabilityFacts Undecidability.Synthetic.EnumerabilityFacts Undecidability.Synthetic.SemiDecidabilityFacts.
Require Export Undecidability.Shared.ListAutomation.
Require Export Undecidability.Synthetic.simple.
Require Export Undecidability.Shared.embed_nat.
Require Export List Arith.
Export EmbedNatNotations.
Local Notation "q << p" := (forall x, q x -> p x) (at level 50).
Lemma proof_computation {X} (p : X -> Prop) f:
enumerator f p -> (exists y, p y)
-> exists n y, f n = Some y.
Proof.
intros E [y [n py] % E].
eauto.
Qed.
Definition mu_enum_NN_sig {X} (p : X -> Prop) f:
enumerator f p -> ex p ->
{n | (exists x, f n = Some x) /\
(forall n1, (exists x1, (f n1) = Some x1) -> n <= n1) }.
Proof.
intros E H.
assert (exists n x, f n = Some x) by exact (proof_computation p f E H).
eapply mu_nat_dep_least in H0 as (? & ?).
- exists x. firstorder. eapply H2. lia. eauto.
- intros n. destruct (f n).
+ left. eauto.
+ right. intros []. inversion H1.
Defined.
Definition mu_enum_NN {X} (p: X -> Prop) f E H : nat
:= proj1_sig (mu_enum_NN_sig p f E H).
Definition mu_enum_NN_spec {X} (p: X -> Prop) f E H
:= proj2_sig (mu_enum_NN_sig p f E H).
Definition mu_enum_sig {X} (p : X -> Prop) f :
forall E H, {x | p x /\ Some x = f (mu_enum_NN p f E H)}.
Proof.
intros E H.
- destruct (f (mu_enum_NN p f E H)) eqn: H2.
+ exists x.
intuition. apply E. eauto.
+ exfalso. remember (mu_enum_NN_spec p f E H). destruct a as [[x a1] a2].
unfold mu_enum_NN in H2. rewrite a1 in H2. discriminate.
Defined.
Definition mu_enum {X} (p: X -> Prop) f E H : X
:= proj1_sig (mu_enum_sig p f E H).
Definition mu_enum_spec {X} (p: X -> Prop) f E H
:= proj2_sig (mu_enum_sig p f E H).
Definition wo_enum_sig {X} (p : X -> Prop) f:
enumerator f p -> (exists x, p x) -> {x | p x}.
Proof.
intros E H. destruct (mu_enum_sig p f E H).
exists x. intuition.
Defined.
Lemma constant_mu_enum_NN {X} (p : X -> Prop) f E:
forall H1 H2, mu_enum_NN p f E H1 = mu_enum_NN p f E H2.
Proof.
intros H1 H2.
set (W1:= mu_enum_NN_spec p f E H1).
set (W2:= mu_enum_NN_spec p f E H2).
destruct W1 as [p1 L1], W2 as [p2 L2].
specialize (L2 (mu_enum_NN p f E H1)).
specialize (L1 (mu_enum_NN p f E H2)).
apply L2 in p1. apply L1 in p2.
unfold mu_enum_NN in *. lia.
Qed.
Lemma mu_enum_agree {X} (p : X -> Prop) f E H:
f (mu_enum_NN p f E H) = Some (mu_enum p f E H).
Proof.
remember (mu_enum_spec p f E H). cbn in a. destruct a as [a1 a2].
unfold mu_enum. rewrite a2. trivial.
Qed.
Lemma constant_mu_enum {X} (p : X -> Prop) f E:
forall H1 H2, mu_enum p f E H1 = mu_enum p f E H2.
Proof.
intros H1 H2.
assert
(Some (mu_enum p f E H1) = Some (mu_enum p f E H2)).
- rewrite <- (mu_enum_agree p f E H1).
rewrite <- (mu_enum_agree p f E H2).
rewrite (constant_mu_enum_NN p f E H1 H2).
trivial.
- inversion H. trivial.
Qed.
Section ComplToBound.
Definition complToBound L b : list nat
:= filter (fun x => Dec (~ In x L)) (seq 0 (S b)).
Lemma complToBound_Bound L b :
forall x, In x (complToBound L b) -> x <= b.
Proof.
intros x [H % in_seq ?] % in_filter_iff. lia.
Qed.
Lemma filter_length {X} f (l : list X) :
length l = length (filter f l) + length (filter (fun x => (negb (f x))) l).
Proof.
induction l; cbn.
- reflexivity.
- destruct f; cbn; lia.
Qed.
Lemma filter_NoDup {X} f (l : list X) :
NoDup l -> NoDup (filter f l).
Proof.
induction 1; cbn.
- econstructor.
- destruct f; eauto. econstructor; auto.
intros ? % in_filter_iff. firstorder.
Qed.
Lemma complToBound_length L b:
length (complToBound L b) + length L >= S b.
Proof.
rewrite <- (seq_length (S b) 0).
erewrite filter_length with (l := seq 0 (S b)).
unfold complToBound.
eapply plus_le_compat_l.
generalize (seq_NoDup (S b) 0).
generalize (seq 0 (S b)). clear.
intros. erewrite filter_ext with (g := fun x => Dec (In x L)).
2:{ intros a. destruct Dec; cbn; destruct Dec; firstorder congruence. }
eapply NoDup_incl_length. now eapply filter_NoDup.
clear. induction l; cbn.
- firstorder.
- destruct Dec; cbn. 2: eauto.
intros ? [-> | ]; eauto.
Qed.
Lemma complToBound_NoDup L b:
NoDup (complToBound L b).
Proof.
eapply filter_NoDup, seq_NoDup.
Qed.
Lemma firstn_In {X} (l : list X) n x : In x (firstn n l) -> In x l.
Proof.
induction n in x, l |- *; destruct l; cbn; firstorder.
Qed.
Lemma firstn_NoDup {X} (l : list X) n : NoDup l -> NoDup (firstn n l).
Proof.
induction 1 in n |- *; destruct n; cbn; try now econstructor.
econstructor; eauto.
now intros ? % firstn_In.
Qed.
End ComplToBound.
Section fix_ct.
(* Assuming enumerator of semidecidable predicates W and its semidecidability *)
(* Variable W : nat -> nat -> Prop. *)
Lemma es : forall p : nat -> Prop, semi_decidable p <-> exists c, forall x, W c x <-> p x.
Proof.
intros p. rewrite <- enum_iff, W_spec. firstorder.
Qed.
Variable W_SDec: nat * nat -> nat -> bool.
Variable W_semidecider: semi_decider W_SDec (fun '(c,x) => W c x).
Variable c_top : nat.
Variable c_top_spec: forall x, W c_top x.
(* Auxiliary Predicate C *)
Definition C : (nat*nat) -> Prop
:= fun '(c,x) => W c x /\ x > 2*c.
Lemma C_nonempty:
C (c_top, 1 + 2 * c_top).
Proof.
split.
- apply c_top_spec.
- lia.
Qed.
Definition C_SDec: (nat*nat) -> nat -> bool
:= fun '(c,x) n => andb (W_SDec (c,x) n) (2*c <? x).
Lemma C_semidecider:
semi_decider C_SDec C.
Proof.
intros [c x].
split.
- intros [Wcx E].
apply (W_semidecider(c,x)) in Wcx as [n Wcx].
exists n.
apply leb_correct_conv in E. unfold C_SDec. rewrite Nat.ltb_antisym, Wcx, E. firstorder.
- intros [n Ccx].
apply RelationClasses.eq_Symmetric, Bool.andb_true_eq in Ccx as [H1 H2].
split.
+ apply (W_semidecider (c,x)). exists n. firstorder.
+ apply leb_complete_conv.
rewrite Nat.leb_antisym.
rewrite <- H2. firstorder.
Qed.
Definition iso_three_nat_func : nat -> (nat * nat) * nat
:= fun! ⟨n1, n2⟩ => (unembed n1, n2).
Definition surjective {X} {Y} (f : X -> Y) := forall y, exists x, f x = y.
Lemma iso_three_nat_func_spec:
surjective iso_three_nat_func.
Proof.
intros [[n1 n2] n3].
exists ⟨ ⟨n1,n2⟩, n3⟩.
unfold iso_three_nat_func.
now rewrite !embedP.
Qed.
(*
Definition C_Enum: nat -> nat * nat
:= enum_To_StrongenumF (semidec_To_enumF C_SDec iso_three_nat_func) (c_top, 1 + 2 * c_top).
*)
Lemma C_strong_enumerator:
∑ C_Enum, strong_enumerator C_Enum C.
Proof.
eexists.
eapply enumerator_strong_enumerator with (x0 := (c_top, 1 + 2 * c_top)).
- exact C_nonempty.
- eapply semi_decider_enumerator. exact _. exact C_semidecider.
Qed.
Definition C_Enum := proj1_sig C_strong_enumerator.
Definition C_enumerator : strong_enumerator C_Enum C := proj2_sig C_strong_enumerator.
Definition DomC : nat -> Prop
:= fun c => exists x, C (c,x).
Lemma DomC_nonempty: DomC c_top.
Proof.
exists (1+2*c_top).
exact C_nonempty.
Qed.
Definition DomC_Enum : nat -> nat
:= fun n => fst (C_Enum n).
Lemma DomC_enumerator:
strong_enumerator DomC_Enum DomC.
Proof.
intros c; split; unfold DomC_Enum.
- intros [x H]. apply C_enumerator in H as [n H].
exists n. rewrite H. trivial.
- intros [n H]. exists (snd (C_Enum n)). rewrite <- H.
apply C_enumerator. exists n. apply surjective_pairing.
Qed.
Definition RangeC c : nat -> Prop
:= fun x => C(c,x).
Definition RangeC_Enum c : nat -> option nat
:= fun n => match C_Enum n with (c1,x) => if c =? c1 then Some x else None end.
Lemma RangeC_Enum_spec c:
enumerator (RangeC_Enum c) (RangeC c).
Proof.
intros x. split.
- intros Ccx. unfold RangeC in Ccx. apply C_enumerator in Ccx as [n Ccx].
exists n. unfold RangeC_Enum. rewrite Ccx.
now rewrite Nat.eqb_refl.
- intros [n H]. apply C_enumerator.
exists n. unfold RangeC_Enum in H.
destruct (C_Enum n) as [c1 x1].
destruct (Nat.eqb_spec c c1).
+ rewrite e. now inversion H.
+ discriminate.
Qed.
(* Definition of psi via recursive mu-operator for enumerable predicates *)
Definition psi: forall c, DomC c -> nat .
Proof.
intros c H.
apply (mu_enum (fun x => C (c,x)) (RangeC_Enum c)).
- apply RangeC_Enum_spec.
- exact H.
Defined.
Lemma psi_spec c H:
C (c,psi c H).
Proof.
eapply (mu_enum_spec (fun x => C (c,x))).
Qed.
Lemma psi_spec1 c H:
psi c H > 2*c.
Proof.
exact (proj2 (psi_spec c H)).
Qed.
Lemma psi_PI c H1 H2:
psi c H1 = psi c H2.
Proof.
apply (constant_mu_enum (fun x => C (c, x)) (RangeC_Enum c)
(RangeC_Enum_spec c) H1 H2).
Qed.
(* Definition of the simple predicate S as the range of psi *)
Definition S : nat -> Prop
:= fun x => exists c H, psi c H = x.
(* S is semidecidable *)
Definition DomC_proof n: DomC (DomC_Enum n).
Proof.
apply DomC_enumerator.
eauto.
Qed.
Definition S_Enum : nat -> nat
:= fun n => let c := DomC_Enum n in
let H := DomC_proof n in
psi c H.
Lemma S_enumerator:
strong_enumerator S_Enum S.
Proof.
intros x.
split.
- intros [c [H E]].
assert (exists n, DomC_Enum n = c) as [n H1].
apply DomC_enumerator. exact H.
exists n. unfold S_Enum.
revert E. revert H. rewrite <- H1. intros H E.
rewrite <- E.
exact (psi_PI (DomC_Enum n) (DomC_proof n) H).
- intros [n H].
unfold S_Enum in H.
exists (DomC_Enum n).
exists (DomC_proof n).
exact H.
Qed.
Corollary S_SemiDec:
semi_decidable S.
Proof.
eapply enumerable_semi_decidable. eapply discrete_iff. econstructor. exact _.
enough (strongly_enumerable S) as ? % enumerable_strongly_enumerable_iff by tauto.
exists S_Enum. exact S_enumerator.
Qed.
(* Complement of S contains no semidecidable, infinite subset *)
Lemma S_No_S_Inf_Subset:
forall (q: nat -> Prop), ~ exhaustible q -> semi_decidable q -> ~ (q << (compl S)).
Proof.
intros q Inf [c Se] % es H.
apply Inf.
exists (seq 0 (1+2*c)).
intros x qx.
assert (x <= 2*c).
- destruct (Nat.le_gt_cases x (2*c)).
+ exact H0.
+ exfalso.
assert (exists x, C (c,x)).
* exists x. split. apply Se, qx. exact H0.
* assert (exists x0, q x0 /\ S x0) as [x0 [qx0 Sx0]].
{ exists (psi c H1). split.
- apply Se. apply psi_spec.
- exists c. exists H1. trivial.
}
apply (H x0 qx0), Sx0.
- apply in_seq. lia.
Qed.
(* Co-Infinity Proof of S *)
Lemma DomC_pullback n L:
(forall x, In x L -> S x /\ x <= 2 * n) -> forall x, In x L
-> exists c H, psi c H = x /\ c < n.
Proof.
intros H1 x [[c [H Sx]] E] % H1.
exists c. exists H. intuition. rewrite <- Sx in E.
assert (psi c H > 2 * c) by apply psi_spec1.
lia.
Qed.
Lemma DomC_pullback_list n L:
NoDup L -> (forall x, In x L -> S x /\ x <= 2 * n) ->
exists (LC: list nat), NoDup LC /\ length LC = length L /\
forall c, In c LC -> exists H, In (psi c H) L /\ c < n.
Proof.
intros ND Sub.
induction L.
- exists nil.
intuition.
- remember (DomC_pullback n (a::L) Sub a).
assert (In a (a::L)) as H0 by intuition .
apply e in H0 as [c0 [H0 [E1 E2]]].
assert (NoDup L) by (inversion ND; intuition).
apply IHL in H as [LC H].
exists (c0::LC).
intuition.
+ constructor. intros In. apply H3 in In as [H0' E].
assert (a = psi c0 H0') by (rewrite <- E1; exact (psi_PI c0 H0 H0')).
rewrite <- H2 in E. inversion ND. intuition. exact H1.
+ cbn. rewrite H. trivial.
+ destruct H2.
* rewrite <- H2. exists H0. rewrite E1. intuition.
* apply H3 in H2 as [H4 E]. exists H4. intuition.
+ intros y In1. assert (In y (a::L)) by intuition.
apply Sub in H1. exact H1.
Qed.
Lemma S_List_Bound n L:
NoDup L -> (forall x, In x L -> S x /\ x <= 2 * n)
-> length L <= n.
Proof.
intros ND [LC H] % DomC_pullback_list; intuition.
rewrite <- H.
assert (incl LC (seq 0 n)).
- intros c [_ [_ H3]] % H2. apply in_seq. lia.
- apply pigeonhole_length in H1.
+ now rewrite seq_length in H1.
+ intros. decide (x1 = x2); tauto.
+ exact H0.
Qed.
(* Listing of predicates up to a bound b *)
Definition PredListTo p : list nat -> nat -> Prop
:= fun L b => forall x, In x L <-> p x /\ x <= b.
Lemma PredListTo_spec {p L b}:
PredListTo p L b -> forall x, In x L -> p x /\ x <= b.
Proof.
intros H x I % H.
apply I.
Qed.
Lemma PredListTo_Bound {p L b}:
PredListTo p L b -> forall x, In x L -> x <= b.
Proof.
intros H x I % H.
apply I.
Qed.
Lemma NoDupBoundH {L} b:
NoDup L -> (forall x, In x L -> x <= b) -> forall x, x > b -> NoDup (x::L).
Proof.
intros ND H x E.
constructor.
- intros H1 % H. lia.
- exact ND.
Qed.
Lemma PredNoDupListTo_NNExist p:
forall b, ~~ exists L, PredListTo p L b /\ NoDup L.
Proof.
induction b; intros H.
- ccase (p 0) as [H0 | H0]; apply H.
+ exists [0]. split; try split.
* intros [E | E]; (try contradiction E).
rewrite <- E. intuition.
* intros E. assert (x = 0) by lia.
rewrite H1. intuition.
* constructor; intuition; constructor.
+ exists nil. split; try split.
* contradiction.
* intros E. assert (x = 0) by lia.
rewrite H1 in E. firstorder.
* constructor.
- apply IHb. intros [L H1].
ccase (p (1 + b)) as [H0 | H0]; apply H.
+ exists ((1+ b) :: L). split; try split.
* intros [E | E]; try (rewrite <- E; intuition).
apply H1 in E. intuition.
* intros [E1 E2]. assert (x <= b \/ x = 1 + b) as [E | E] by lia.
** right. apply H1. intuition.
** left. lia.
* apply (NoDupBoundH b).
** apply H1.
** intros x H3 % H1. lia.
** lia.
+ exists L. split; try split.
* intros E % H1. intuition.
* intros [E1 E2]. assert (x <= b \/ x = 1 + b) as [E | E] by lia.
** apply H1. intuition.
** rewrite E in E1. firstorder.
* apply H1.
Qed.
Lemma complToBound_compl p L b:
PredListTo p L b -> PredListTo (compl p) (complToBound L b) b.
Proof.
intros H x. split.
- intros [H1 H1'] % in_filter_iff.
destruct Dec; cbn in H1'; try congruence.
enough (x <= b).
+ intuition. intros npx. firstorder.
+ apply in_seq in H1. lia.
- intros [H1 H2]. eapply in_filter_iff. split.
+ apply in_seq; lia.
+ destruct Dec; cbn; try tauto. exfalso. firstorder.
Qed.
(* Length of listings of S up to 2*n is bounded by n *)
Lemma S_Listing:
forall n, ~~ exists L, NoDup L /\ length L <= n /\ PredListTo S L (2*n).
Proof.
intros n H. apply (PredNoDupListTo_NNExist S (2*n)).
intros [L H1]. apply H. exists L; intuition.
apply S_List_Bound.
- exact H2.
- apply H0.
Qed.
(* Weak Existence Infinite Criterion *)
Lemma ComplS_Listing:
forall (n: nat) ,~~ exists L, length L >= n /\ NoDup L
/\ forall x, In x L -> ~ S x.
Proof.
intros n H.
apply (S_Listing n). intros [L H1].
apply H. exists (complToBound L (2*n)). repeat split.
- remember (complToBound_length L (2*n)). lia.
- apply complToBound_NoDup.
- intros x I % (complToBound_compl S); intuition.
Qed.
Lemma S_coInfinite:
~ exhaustible (compl S).
Proof.
eapply weakly_unbounded_non_finite.
intros n H. eapply ComplS_Listing with (n := n).
intros (l & ? & ? & H2).
eapply H.
exists (firstn n l).
repeat split.
- rewrite firstn_length. lia.
- now eapply firstn_NoDup.
- intros ? ? % firstn_In. now eapply H2.
Qed.
(* S is a simple predicate *)
Corollary S_simple:
simple S.
Proof.
split.
- eapply semi_decidable_enumerable; eauto. exact S_SemiDec.
- split.
+ exact S_coInfinite.
+ intros (? & ? % enumerable_semi_decidable & ? & ?); eauto.
eapply S_No_S_Inf_Subset; eauto.
Qed.
End fix_ct.
Section S_Star.
Import Coq.Init.Nat.
Variable W_SDec: nat * nat -> nat -> bool.
Variable W_semidecider: semi_decider W_SDec (fun '(c,x) => W c x).
Variable c_top : nat.
Variable c_top_spec: forall x, W c_top x.
Definition S' : nat -> Prop
:= S W_SDec W_semidecider c_top c_top_spec.
(* Auxiliary List *)
Definition S_Pow n : list nat
:= seq (2^n - 1) (2^n).
Lemma pow_pos n:
2^n > n.
Proof.
induction n; cbn; lia.
Qed.
Lemma pow_sum n:
2 ^ n - 1 + 2 ^ n = 2 ^ (1 + n) - 1.
Proof.
induction n; cbn in *; lia.
Qed.
Lemma S_Pow_injective x n1 n2:
In x (S_Pow n1) /\ In x (S_Pow n2) -> n1 = n2.
Proof.
intros [H1 H2].
apply in_seq in H1. apply in_seq in H2.
assert (n1 = n2 \/ 1 + n1 <= n2 \/ 1 + n2 <= n1) by lia.
destruct H as [H | [H | H]].
- exact H.
- assert (2 ^ (1 + n1) - 1 <= x).
+ enough (2 ^ (1 + n1) <= 2 ^ n2) by lia.
apply Nat.pow_le_mono_r; lia.
+ assert (2 ^ n1 - 1 + 2 ^ n1 = 2 ^ (1 + n1) - 1) by apply pow_sum.
lia.
- assert (2 ^ (1 + n2) - 1 <= x).
+ enough (2 ^ (1 + n2) <= 2 ^ n1) by lia.
apply Nat.pow_le_mono_r; lia.
+ assert (2 ^ n2 - 1 + 2 ^ n2 = 2 ^ (1 + n2) - 1) by apply pow_sum.
lia.
Qed.
(* Definition S* *)
Definition S_Star : nat -> Prop
:= fun x => S' x
\/ exists n, (fun '(c,x0) => W c x0) (unembed n)
/\ In x (S_Pow n).
Definition S_Star_compl : nat -> Prop
:= fun x => (compl S') x /\ ~ exists n, ((fun '(c,x0) => W c x0) (unembed n)
/\ In x (S_Pow n )).
Lemma S_Star_comp_agree:
forall x, (compl S_Star) x <-> S_Star_compl x.
Proof.
intros x. unfold S_Star_compl, S_Star, compl, not. now rewrite Decidable.not_or_iff.
Qed.
(* S* is semidecidable *)
Lemma S_Star_semidec:
semi_decidable S_Star.
Proof.
apply semi_decidable_or.
- apply S_SemiDec.
- eapply semi_decidable_ex.
eapply semi_decidable_and.
+ exists (fun pa n => W_SDec (unembed (fst pa)) n).
intros [n0 x0]; specialize (W_semidecider (unembed n0)); firstorder.
+ apply decidable_semi_decidable. eapply dec_decidable.
intros pa. exact _.
Qed.
(* Complement of S* contains no semidecidable, infinite subset *)
Lemma S_Star_No_S_Inf_Subset:
forall (q: nat -> Prop), ~ exhaustible q -> semi_decidable q -> ~ (q << (compl (S_Star))).
Proof.
intros q H1 H2 H3.
eapply (S_No_S_Inf_Subset).
- exact H1.
- exact H2.
- intros x qx % H3.
intros nSx. apply qx. left. exact nSx.
Qed.
(* Co-Infinite Proof of S* *)
Lemma W_empty: exists c_bot, forall x, ~ W c_bot x.
Proof.
destruct (es (fun _ => False)) as [[c_bot]_].
- exists (fun _ _ => false). red. firstorder congruence. congruence.
- exists c_bot. firstorder.
Qed.
Lemma W_CoInfinite:
~ exhaustible (compl (fun '(c,x) => W c x)).
Proof.
destruct W_empty as [c_bot H].
eapply unbounded_non_finite.
intros n.
exists (map (fun x => (c_bot, x)) (seq 0 n)).
repeat split.
- now rewrite map_length, seq_length.
- eapply NoDup_map. now intros ? ? [= ->]. eapply seq_NoDup.
- intros [? ?] (? & [= <- <-] & ? % in_seq) % in_map_iff. eapply H.
Qed.
Lemma WNat_CoInfinite:
~ exhaustible (compl (fun n => (fun '(c,x) => W c x) (unembed n))).
Proof.
destruct W_empty as [c_bot H].
eapply unbounded_non_finite.
intros n.
exists (map (fun x => ⟨c_bot, x⟩) (seq 0 n)).
repeat split.
- now rewrite map_length, seq_length.
- eapply NoDup_map. 2: eapply seq_NoDup. intros ? ? ? % (f_equal unembed).
rewrite !embedP in H1. congruence.
- intros x (? & <- & ? % in_seq) % in_map_iff. red. now rewrite embedP.
Qed.
Lemma S_Pow_NotInS n:
~ Forall S' (S_Pow n).
Proof.
intros H.
assert (length (S_Pow n) <= 2^n - 1).
- eapply S_List_Bound.
+ apply seq_NoDup.
+ intros x. split.
* revert H0. apply Forall_forall. exact H.
* apply in_seq in H0. lia.
- unfold S_Pow in H0. rewrite seq_length in H0. clear H.
induction n; cbn in H0; lia.
Qed.
Lemma Not_Forall_2_WeakEx {X} (p: X -> Prop) L:
(~ Forall p L) -> ~~ exists x, In x L /\ ~ p x.
Proof.
intros H1 H2.
induction L.
- now apply H1.
- ccase (p a) as [H | H].
+ apply IHL.
* contradict H1. now constructor.
* contradict H2. destruct H2. exists x; firstorder.
+ apply H2. exists a. firstorder.
Qed.
Lemma S_Pow_WeakEx_NotS n:
~~ exists x, In x (S_Pow n) /\ (compl S') x.
Proof.
apply Not_Forall_2_WeakEx, S_Pow_NotInS.
Qed.
Lemma S_Star_coInfinite:
~ exhaustible (compl S_Star).
Proof.
eapply non_finite_nat.
intros n0.
assert (~ ~ (exists n : nat, n >= n0 + 1/\ compl (fun n : nat => let '(c, x) := unembed n in W c x) n)).
- eapply non_finite_nat. apply WNat_CoInfinite.
- contradict H. intros [n H2].
apply (S_Pow_WeakEx_NotS n).
intros [x [H3 H4]]. apply H.
exists x; split.
+ apply in_seq in H3. enough (2 ^ n > n) by lia. apply pow_pos.
+ apply S_Star_comp_agree.
unfold S_Star_compl. split.
* exact H4.
* intros [n1 [H5 H6]].
assert (n = n1) by now apply (S_Pow_injective x).
apply H2. now rewrite H0.
Qed.
(* S* is simple *)
Corollary S_Star_simple:
simple S_Star.
Proof.
split.
- eapply semi_decidable_enumerable; eauto. exact S_Star_semidec.
- split.
+ exact S_Star_coInfinite.
+ intros (? & ? % enumerable_semi_decidable & ? & ?); eauto.
eapply S_Star_No_S_Inf_Subset; eauto.
Qed.
(* W truth-table reduces to S* *)
Lemma S_Star_split L:
Forall S_Star L
-> Forall S' L \/ exists n, (fun '(c,x) => W c x) (unembed n) /\ exists x, In x L /\ In x (S_Pow n).
Proof.
induction 1.
- left; constructor.
- destruct IHForall.
+ destruct H.
* left; now constructor.
* right. destruct H.
exists x0. intuition.
exists x; intuition.
+ right. destruct H1.
exists x0. intuition. destruct H3.
exists x1; intuition.
Qed.
Lemma Forall2_equiv {X} {Y} {p : X -> Prop} {q : Y -> Prop} l1 l2:
Forall2 (fun x y => q y <-> p x) l1 l2 ->
Forall p l1 <-> Forall q l2.
Proof.
induction 1.
- split; now econstructor.
- split; intros H1; inv H1; econstructor; firstorder.
Qed.
Lemma Forall_map {X} {p : X -> Prop} l :
Forall (fun P => P) (map p l) <-> Forall p l.
Proof.
induction l; cbn.
- split; econstructor.
- split; intros H1; inv H1; econstructor; firstorder.
Qed.
Lemma tt_red_W_S_Star:
(fun '(c,x) => W c x) ⪯ₜₜ S_Star.
Proof.
unshelve eexists (fun '(c, x) => existT (let n := embed (c,x) in S_Pow n) (mk_tt (fun L => if (Forall_dec (fun b => b = true) _ L) then true else false))).
1:{ intros b. cbn. decide (b = true); tauto. }
1:{ refine (length (S_Pow ⟨c, x⟩)). }
intros [c x] L. cbn.
unfold reflects. intros H1.
rewrite eval_tt_mk_tt'.
2:{ eapply list.Forall2_length in H1.
now rewrite H1, map_length. }
unshelve edestruct (Forall_dec (fun b => b = true)) as [H0 | H0].
- intuition.
assert (Forall S_Star (S_Pow ⟨c,x⟩)). {
eapply Forall2_equiv in H1.
rewrite Forall_map in H1. now eapply H1. }
clear H0. rename H2 into H0.
apply S_Star_split in H0 as [H0 | H0].
+ apply S_Pow_NotInS in H0. contradict H0.
+ destruct H0 as [n [H0 [x0 H3]]].
apply S_Pow_injective in H3.
subst. now rewrite embedP in H0.
- split; intros; try congruence.
contradict H0.
assert (Forall S_Star (S_Pow (embed (c,x)))).
* apply Forall_forall.
intros x0 H3. right.
exists (embed (c, x)). rewrite embedP. intuition.
* eapply Forall2_equiv. exact H1.
now eapply Forall_map.
Qed.
End S_Star.
|
{"author": "uds-psl", "repo": "coq-synthetic-computability", "sha": "dc6eaeef99c76f4ff2903b8c07e2928622ee36ba", "save_path": "github-repos/coq/uds-psl-coq-synthetic-computability", "path": "github-repos/coq/uds-psl-coq-synthetic-computability/coq-synthetic-computability-dc6eaeef99c76f4ff2903b8c07e2928622ee36ba/Synthetic/simple_construction.v"}
|
[STATEMENT]
lemma iT_Plus_image_conv: "I \<oplus> k = (\<lambda>n.(n + k)) ` I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. I \<oplus> k = (\<lambda>n. n + k) ` I
[PROOF STEP]
by (simp add: iT_Plus_def)
|
{"llama_tokens": 97, "file": "Nat-Interval-Logic_IL_IntervalOperators", "length": 1}
|
# Timestep conversion function
function getindexfromyear_dice_2013(year)
baseyear = 2010
if rem(year - baseyear, 5) != 0
error("Invalid year")
end
return div(year - baseyear, 5) + 1
end
# Get parameters from DICE2013 excel sheet
# range is the range of cell values on the excel sheet and must be a string, "B56:B77"
# parameters = :single for just one value, or :all for entire time series
# sheet is the sheet in the excel file to reference (i.e. "Base")
# T is the length of the time period (i.e 60)
# example: getparams("B15:BI15", :all, "Base", 60)
function getparams(f, range::String, parameters::Symbol, sheet::String, T)
if parameters == :single
data = f[sheet][range]
vals = Float64(data[1])
elseif parameters == :all
data = f[sheet][range]
s = size(data)
if length(s) == 2 && s[1] == 1
# convert 2D row vector to 1D col vector
data = vec(data)
end
dims = length(size(data))
vals = Array{Float64,dims}(data)
end
return vals
end
|
{"hexsha": "f591357a6a2e4f01b67b4152caf5ad1270e11f5e", "size": 1080, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/helpers.jl", "max_stars_repo_name": "anthofflab/MimiDICE2013.jl", "max_stars_repo_head_hexsha": "ae2d83788d55697d25a08e46fb872476c18c20f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-14T01:07:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T01:07:47.000Z", "max_issues_repo_path": "src/helpers.jl", "max_issues_repo_name": "anthofflab/MimiDICE2013.jl", "max_issues_repo_head_hexsha": "ae2d83788d55697d25a08e46fb872476c18c20f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2019-05-23T18:46:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T23:45:46.000Z", "max_forks_repo_path": "src/helpers.jl", "max_forks_repo_name": "anthofflab/MimiDICE2013.jl", "max_forks_repo_head_hexsha": "ae2d83788d55697d25a08e46fb872476c18c20f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-06-10T19:26:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-31T03:02:10.000Z", "avg_line_length": 24.5454545455, "max_line_length": 86, "alphanum_fraction": 0.6185185185, "num_tokens": 310}
|
"""
Fourier Transform
-Find Fourier Transform of images using OpenCV
-utilize FFT functions in Numpy
-FT applications
functions:
cv2.
dft()
idft()
FT used to analyze freq characteristics of filters
for images
2D Discrete Fourier Transform used to find frequency domain
FFT calculates DFT
sinusoidal signal: x(t)=A * sin(2 * \pi *f * t)
f - freq signal
if freq domain taken, can see a spike at f
if signal sampled to form discrete signal, get same freq domain, but periodic in range:
[- \pi , \pi] or [0, 2 * \pi] (or [0, N] for N-pt DFT)
consider image a signal sampled in 2 directions
taking FT in both X and Y dirs gives freq representation of image
for sinusoidal signal, if ampl varies fast in time -> hi freq signal
for images:
amplitude varies drastically at edge points or noises
therefore edges and noises high freq contents of image
no changes in amplitude: lo freq component
"""
# FT in Numpy
# numpy has FFT package
# np.fft.fft2 prov. freq transform which is complex array
# arguments:
# input image (grayscale)
# size of output array; if greater than size of input image, input image padded w/ 0s before calculation of FFT
# less than input image: input image cropped
# no args passes: output size same as input
# result: zero freq component @ top left corner
# to bring to center: shift result by N/2 in both directions
# done by np.fft.fftshift()
# once find frequency transform -> find magnitude spectrum
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg', 0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# can see whiter region at center, showing low freq content is prominent
# ^ found freq transform; now, can do ops in freq domain
# hi pass filtering
# image reconstruction (ie find inverse DFT)
# remove lo freqs with rectangular window, size 60x60
# apply inverse shift using np.fft.ifftshift()
# so DC component is again at top right hand corner
# find inverse FFT using np.ifft2()
# result complex #; take its abs value
rows, cols = img.shape
crow, ccol = rows/2, cols/2
fshift[crow-30:crow+30, ccol-30:ccol+30] = 0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
plt.subplot(131),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(img_back, cmap = 'gray')
plt.title('Image after HPF'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_back)
plt.title('Result in JET'), plt.xticks([]), plt.yticks([])
plt.show()
# don't use rectangular filters for masking
# create ripple-like ringing effects
# mask converted to sinc shape, causing problem
# use Gaussian window instead
# Fourier Transform in OpenCV
# functions: cv2.dft() and cv2.idft()
# same result as before, but in 2 channels
# 1st channel: real part of result
# 2nd channel: imaginary part
# convert input image to np.float32 first
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg', 0)
dft = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20 * np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))
plt.subplot(121), plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# NOTE: use cv2.cartToPolar(), which returns both magnitude and phase
# now, we do inverse DFT
# previously, we created HPF
# now, remove hi freq contents of image
# -> apply LPF
# blurs the image
# create a mask first with high value, 1, @ low freq
# ie pass LF content
# 0 at HF region
rows, cols = img.shape
crow, ccol = rows/2, cols/2
# create mask first, center square is 1, all remaining zeros
mask = np.zeros((rows, cols, 2), np.uint8)
mask[crow-30:crow+30, ccol-30:ccol+30] = 1
# apply mask and iDFT
fshift = dft_shift * mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0], img_back[:,:,1])
plt.subplot(121), plt.imshow(img, cmap = 'gray)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img_back, cmap = 'gray)
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
|
{"hexsha": "d0b47a7d518e7862e5d0b67a09610a50524966bc", "size": 4853, "ext": "py", "lang": "Python", "max_stars_repo_path": "opencv/tutorials/imageProcessing/transform/fourier.py", "max_stars_repo_name": "SSG-DRD-IOT/commercial-iot-security-system", "max_stars_repo_head_hexsha": "0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "opencv/tutorials/imageProcessing/transform/fourier.py", "max_issues_repo_name": "SSG-DRD-IOT/commercial-iot-security-system", "max_issues_repo_head_hexsha": "0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opencv/tutorials/imageProcessing/transform/fourier.py", "max_forks_repo_name": "SSG-DRD-IOT/commercial-iot-security-system", "max_forks_repo_head_hexsha": "0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2022-01-22T05:02:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:13:06.000Z", "avg_line_length": 34.6642857143, "max_line_length": 119, "alphanum_fraction": 0.6931794766, "include": true, "reason": "import numpy", "num_tokens": 1304}
|
\documentclass[output=paper]{langsci/langscibook}
% % \ChapterDOI{10.5281/zenodo.4680306} %initial publication
\ChapterDOI{10.5281/zenodo.5530358} %corrected publication
\author{Henk C. van Riemsdijk\affiliation{Tilburg University}}
\title{Case mismatches and match fixing cases}
\abstract{Matching and mismatching are names for a fairly wide variety of
phenomena in the grammar of many, perhaps most, languages. Given the fact
that inflection is a crucial element in (mis-)matching phenomena, the
overall attention that these phenomena have attracted has been fairly poor.
The present article attempts to tackle one specific aspect of
(mis-)matching phenomena that we may suspect could be a key to a broader
set of facts in this domain. Specifically, the article examines the
relationship between case matching and case attraction. The former is
frequently found in the syntax of free \isi{relative clauses}, while the second
is often a characteristic of \isi{relative clauses} headed by pronominal
elements. As there are good reasons to consider these two sets of phenomena
to be closely related, an attempt will be made here to show that matching
and attraction are indeed two sides of the same coin. The crucial argument
will be to pursue the analysis of headed and headless \isi{relative clauses} in terms
of what has come to be called \enquote{grafting}.}
\begin{document}\glsresetall
\maketitle
\begin{refcontext}
\section{Case matching and case attraction in relative clauses}
This article will address certain phenomena concerning
morphological case\is{case!morphological case} in a number of relative clause\is{relative clauses} constructions, in
particular case (non-)attraction and case (mis-)matching.\footnote{There
are similar issues in many other domains of grammar. To give just one
example, in various constructions involving \isi{coordination} we find both
matching requirements and mismatches. For a discussion of such
phenomena in \textit{right node raising} constructions, for example, see
\citet{Larson2012}. In the present article I use the term \emph{case
(mis-)matching} to refer to case conflicts independently of whether
they occur in a single position or in two (usually adjacent or close)
positions. To distinguish the two, I use \emph{case attraction} (two
positions interacting) and \emph{case superimposition} (two different cases
that fight for a single position).} The main puzzle that I would like to
discuss is the question of how many positions are involved. In case attraction
we are dealing with a head of the relative clause\is{relative clauses} and the wh-phrase in the
Spec,CP of the relative clause\is{relative clauses}: two separate positions. In \glspl{FR}, however,
it seems as if in some cases at least there is just a single position in which
a case is realized that the matrix environment and the relative clause
environment fight about determining.
\begin{sloppypar}Starting with case attraction, let us look at some examples from Ancient
Greek.\footnote{The examples given here are adapted from
\textcite{Hirschbuhler1976} and were cited in
\citet{GroosVanRiemsdijk1981}. I use superscripts to indicate the case
imposed by the item in question and subscripts to indicate the actual case
borne by the element in question.}\end{sloppypar}
\ea\label{ex:15.1}
\ea
\gll pro t\=on kak\=on \emph{ha} oida\\
instead-of\textsuperscript{\Gen{}} the evils\tss{\Gen{}} which\tss{\Acc{}} I-know\textsuperscript{\Acc{}} \\
\glt ‘instead of the evils which I know’\\
\ex pro\textsuperscript{\Gen{}} t\=on kak\=on\tss{\Gen{}} \emph{h\=on}\tss{\Gen{}} oida\textsuperscript{\Acc{}}
\z
\z
In (\ref{ex:15.1}a) the head of the relative clause\is{relative clauses} has the genitive\is{genitive case} case imposed by the
preposition in the matrix while the relative pronoun\is{relative pronouns} has the accusative
case\is{accusative case} imposed by the embedded verb ‘know’. In (\ref{ex:15.1}b) however, the case of the
relative pronoun has been changed from accusative to genitive\is{genitive case}, the case of the
head. This is called case attraction.
\ea\label{ex:15.2}\ili{Greek}
\ea
\gll \dots{} ekpiein sun \emph{toutois} \emph{hous} malista phileis\\
{} to-drink with\textsuperscript{\Dat{}} those\tss{\Dat{}} whom\tss{\Acc{}} best you-love\textsuperscript{ACC}\\
\glt ‘to drink with those whom you love best’
\ex \dots{} ekpiein sun\textsuperscript{\Dat{}} \emph{hois}\tss{\Dat{}} malista phileis\textsuperscript{\Acc{}}
\z
\z
(\ref{ex:15.2}a) is a headed relative clause\is{relative clauses} in which the head is in the dative\is{dative case} case
according to the requirements by the matrix preposition while the relative
pronoun appears in the \isi{accusative case} thereby fulfilling the case requirements
of the verb in the relative clause\is{relative clauses}. (\ref{ex:15.2}b) is the corresponding FR\@. As there is
only one single relative pronoun\is{relative pronouns}, that is, only one position to express case
morphology, a conflict arises between the dative\is{dative case} required by the matrix and the
accusative imposed by the relative clause\is{relative clauses}: a case mismatch\is{case!case mismatches}. In some languages
this would lead to a conflict that cannot be resolved. In such languages an
example like (\ref{ex:15.2}a) could not be expressed by means of a FR\@. In Ancient Greek,
however, the conflict is resolved by means of a kind of radical form of case
attraction which we might call case superimposition. In (\ref{ex:15.2}b) the matrix dative\is{dative case}
supersedes the embedded accusative.
The question as to whether a case conflict in a given language results in
ungrammaticality or whether it can be resolved by case attraction (or
superimposition) is a complicated one. For \ili{Ancient Greek},
\textcite{Hirschbuhler1976} proposed a case hierarchy:\footnote{See also
\citet{Harbert1983} for extensive discussion, including \ili{Gothic}.}
\ea\label{ex:15.3}
\Nom{} $>$ \Acc{} $>$ \Dat{} $>$ \Gen{}
\z
This hierarchy goes from least oblique to most oblique. And the corresponding
principle is as in \eqref{ex:15.4}.
\ea\label{ex:15.4}
In situations of case superimposition the more oblique case wins.
\z
This will correctly predict that in (\ref{ex:15.2}b) it is the dative\is{dative case} that wins and
suppresses the accusative.
German may well be the language for which this issue has been studied in the
greatest detail.\footnote{See among many others \citet{Vogel2001}.} There is
considerable variation in the judgments ranging from those who allow very few
case mismatches to those who allow virtually all of them.\footnote{This is just
scratching the surface. As an anonymous reviewer points out, \ili{Polish} does
not resolve case mismatches. To circumvent ineffability problems, however,
\ili{Polish} makes extensive use of so-called \enquote{light headed relatives},
that is, \isi{relative clauses} with a pronominal head. See \citet{Citko2004}.
Furthermore, it appears that in modern Greek the matrix case always wins, cf.\
\citet{Daskalaki2011} and \citet{Spyropoulos2007}.\label{fn:15.6}}
This is not, however, the question that I mean to discuss in this paper.
Instead, the issue I want to address here is what it means to say that “in the
FR there is only one position to realize case”. Take the following examples of
\glspl{FR} in Standard High German.\footnote{These examples are from \textcite[15, ex. 22a,b]{Vogel2001}.}
\ea\label{ex:15.5}\ili{German}
\ea[]{%
\gll Wen\tss{\Acc{}} du einlädst\textsuperscript{\Acc{}} wird auch kommen\textsuperscript{\Nom{}}.\\
who-\Acc{} you invite will also come\\
\glt ‘Whoever you invite is sure to come too.’}
\ex[*]{
\gll Sie zerstört\textsuperscript{\Acc{}}, wer\tss{\Nom{}} ihr begegnet\textsuperscript{\Nom{}}.\\
she destroys who-\Nom{} her-\Dat{} meets\\
\glt ‘She destroys whoever meets her.’}
\z
\z
At first sight, there is a relative clause\is{relative clauses} without a head and a relative
pronoun in the relative clause\is{relative clauses}. So, ostensibly, there is only one pronoun that
has a slot for case morphology. Suppose, however, that \glspl{FR} do have a
head just like headed relatives but that the head is silent.\footnote{This was
the analysis proposed in \citet{GroosVanRiemsdijk1981}.} In that case we could
say that there are two slots for case morphology, but at spell-out there is
only one in which case can be overtly expressed.
As I will suggest at the end of \Cref{sec:15.3}, there is only one
syntactic position which is \enquote{shared} by the relative clause\is{relative clauses} and the
matrix clause. An anonymous reviewer remarks that from a semantic point of view
the FR-pronoun is not a shared argument: the argument of the relative predicate
is the FR-pronoun but the argument of the matrix predicate is the FR as a
whole. Notice, however, that on a raising analysis of relative
clauses\is{relative clauses!raising analysis} the head
of the relative clause\is{relative clauses} is similarly shared between the relative clause\is{relative clauses} and the
matrix clause. Space prevents a more extensive discussion here.
\section{One position for case or two?}
While there are language particular differences in the case hierarchies and the
way they determine case attraction and case superimposition, the similarities
are nevertheless considerable. And the fact that they affect both attraction
and superimposition strongly suggests that the structures to which they apply
should be sufficiently similar in order to allow for the generalization to be
expressed. It follows, apparently, that the silent head analysis of \glspl{FR}
should be preferred as the adoption of that analysis implies the presence of
two positions in both constructions: case attraction and case superimposition.
Simplifying, the structure of (\ref{ex:15.5}a) would be roughly like \eqref{ex:15.6}.
\ea\label{ex:15.6}
{}[\tss{DP} [ $\varnothing$ ]\tss{\Nom{}} ] [\tss{CP} [\tss{Spec,CP}
[\tss{WhP} [\tss{Wh} wen ]\tss{\Acc{}} ]\tss{i} du einlädst t\tss{i} ] wird auch kommen]
\z
The nominative\is{nominative case} case feature on the silent head and the \isi{accusative case} feature
on the relative pronoun\is{relative pronouns} now have to fight about which one of them can be
realized on the only available host, the relative pronoun\is{relative pronouns} \emph{wen}. In case
attraction situations, which are now structurally identical except that the
head is lexically realized, not silent, each case feature can be realized on
its host, but nevertheless the two case features\is{case!case features} may “feel the necessity to
create a closer bond between them”, resulting in a copy of one of the two case
features being superimposed on the other one. And that is case attraction.
Unfortunately the situation is somewhat more complicated than that. I have
argued (cf.~\cite{VanRiemsdijk2006a})\footnote{See this chapter for an ample
overview of the relevant literature. An updated version of this chapter has
appeared in \textcite{VanRiemsdijk2017}.} that \glspl{FR} should be treated in
terms of what I call \emph{grafting}. Let me first introduce the notion of
\enquote{graft} and then show how \glspl{FR} could be analysed in terms of
graft structures.
There are ample arguments for grafts \parencite[cf.][]{VanRiemsdijk2000}. A
more \enquote{authoritative} view is presented in \citet{VanRiemsdijk2006b}. As
an illustration of simple cases, consider a DP like \eqref{ex:15.7}:
\ea\label{ex:15.7}
a far from simple matter
\z
It is quite easy to see that assigning a structure to such a DP is, indeed, a
far from simple matter. Clearly we have a head noun \textit{matter}. To the left there
is an attributive AP\@. But there are two adjectives: \textit{far} and \textit{simple}.
Assuming that \textit{from simple} is a PP, that PP is presumably a complement of
\textit{far}. That is, we might assume that the structure of that PP in \eqref{ex:15.7} is
equivalent to that of \eqref{ex:15.8}.
\ea\label{ex:15.8}
far from the airport
\z
But this leads immediately to a serious problem in that \eqref{ex:15.9} is ungrammatical:
\ea[*]{a far from the airport hotel}\label{ex:15.9}
\z
The reason is quite straightforward. The head of the AP, \textit{far}, is not left
adjacent to the head noun \textit{hotel}. That they must be adjacent has been argued
in \citet{Emonds1985,Emonds1976}, \citet{Williams1982},
\citet{VanRiemsdijk1993}, \citet{BibHolRob2014}. As \eqref{ex:15.7} is grammatical, we are
led to assume that \textit{simple} is the head. This assumption also makes sense
semantically in that the meaning of \eqref{ex:15.7} is something like \textit{a not really simple
matter}, where \textit{not really} is a modifier of the head \textit{simple}.\footnote{Note
also, that, as an anonymous reviewer observes, in \eqref{ex:15.7} the postnominal
position for the AP is ungrammatical: *\emph{a matter far from simple}
while in \eqref{ex:15.9} the postnominal position of the AP makes the phrase
grammatical: \emph{a hotel far from the airport.} } In short, we have a
paradox, if we want to express the structure of \eqref{ex:15.7} taking all these
considerations into account. The notion of graft (which I have argued is
simply a special case of merge, cf.~\cite{VanRiemsdijk2006b}) offers a solution (see \figref{fig:ex:15.10}).
\begin{figure}\caption{\label{fig:ex:15.10}A simple graft}
\begin{tikzpicture}[baseline]
\begin{scope}[xshift=.9995cm]
\Tree
[.DP
[.D a ]
[.N$'$
[.AP
[.A
\node (a) {simple};
]
]
[.N
matter
]
]
]
\end{scope}
\begin{scope}[yshift=-8.5cm, grow'=up]
\Tree
[.AP
[.A
far
]
[.PP
[.P
from
]
[.AP
\node (A) {A};
]
]
]
\end{scope}
\draw (a) -- (A);
\end{tikzpicture}
\end{figure}
Cases like \eqref{ex:15.7} alone would not suffice to justify this type of approach. But
there is considerable evidence
(cf.~\citealt{VanRiemsdijk2001,VanRiemsdijk2006a,VanRiemsdijk2006b,VanRiemsdijk2006c,VanRiemsdijk2010})
for grafts from a number of constructions including \glsdesc{FR}s (\glspl{FR})
and particularly a special type of FR called \glspl{TFR}.
On this view, \glspl{FR} will be analysed along the following
lines \REF{ex:15.11}:
\ea\label{ex:15.11}\ili{German}
\ea[]{%
\gll Ich gebe\textsuperscript{\Dat{}} die Belohnung wem\tss{\Dat{}} eine gebührt\textsuperscript{\Dat{}}.\\
I give the reward to-whom one deserves\\
\glt ‘I give the reward to who deserves one.’}
\ex[]{%
\gll Ich gebe\textsuperscript{\Dat{}} die Bel. *wer\tss{\Nom{}}/*wem\tss{\Dat{}} eine verdient\textsuperscript{\Nom{}}.\\
I give the reward who/whom one deserves\\
\glt }
\ex[*]{%
\gll Wem\tss{\Dat{}} /*wer\tss{\Nom{}} eine Belohnung gebührt\textsuperscript{\Dat{}} bekommt\textsuperscript{\Nom{}} eine.\\
whom who a reward deserves receives one\\
\glt }
\z
\z
(\ref{ex:15.11}a), which incidentally illustrates the case matching effect, would roughly
be assigned the following structure under a graft approach (\figref{fig:ex:15.12}).
%%please move the includegraphics inside the {figure} environment
%%\includegraphics[width=\textwidth]{vanRiemsdijkrevised-img9.png}
%\emph{\figref{fig:2}:FR analysis by grafting}
\begin{figure}\caption{\label{fig:ex:15.12}\gls{FR} analysis by grafting}
\begin{tikzpicture}[baseline]
\begin{scope}
\node [text width=5cm] (treeA) {input tree A (matrix/host):};
\end{scope}
\begin{scope}[xshift=3cm]
\Tree [.\node(vbar){V$'$};
[.DP \edge [roof]; {die Belohnung} ]
[.V geb- ]
]
\end{scope}
\begin{scope}[yshift=-3cm]
\node [text width=5cm] (treeB) {tree B (grafted):};
\end{scope}
\begin{scope}[xshift=1cm, yshift=-3cm]
\Tree [.\node(cp){\textbf{CP}};
\edge[draw=none]; {}
[.\node(ip){\textbf{IP}};
\edge[draw=none]; {}
[.\emph{v}P
[.\node(dp1){DP}; eine ]
[.VP
[.\node(dp2){DP}; wem ]
[.V gebühr- ]
]
]
]
]
\end{scope}
\begin{scope}[xshift=6.5cm, yshift=-2.0cm]
\Tree [.\node(vp){\textbf{VP}};
\edge[draw=none]; {}
[.V$'$
[.DP \edge [roof]; {die B.} ]
[.V geb- ]
]
]
\end{scope}
\draw [->, shorten >=1mm, shorten <=1mm] (vbar) -|
node[below left]{graft}(vp);
\draw [dashed] (ip.south).. controls +(south west:1.25) and
+(north east:.75)..node[left=.25cm, solid, draw]{1}(dp1.north);
\node at (1, -8) [inner sep=0mm] (control) {};
\draw [dashed] (cp.south).. controls +(south west:3.0)
..node[left=.25cm, solid, draw]{2}(control);
\draw [dashed] (control).. controls +(south east:1.25) and
+(north:1)..(dp2.north);
\draw [dashed] (vp.south)..controls +(south:2) and
+(north:2)..node[above left=.25cm, solid, draw]{3}(dp2.north);
\end{tikzpicture}
\end{figure}
The strongest arguments for a graft/multi-dominance approach come from
\glspl{TFR}. Below I will summarize some of the major properties of
\glspl{TFR} to show what these arguments are.\footnote{Some of these
observations are due to \citet{Wilder1998} and some are my own, see
\citet{VanRiemsdijk2001,VanRiemsdijk2006a,VanRiemsdijk2006b}.}
\begin{itemize}
\item \glspl{FR} are definite or free choice universal as in \eqref{ex:15.13} –
\glspl{TFR} are typically indefinite, cf. \eqref{ex:15.14}, that is, it is the
predicate nominal (PN) that determines the indefiniteness of the
\gls{TFR}, not the \emph{wh}{}-word.
\ea\label{ex:15.13}
I eat what is on the table.
\z
\ea\label{ex:15.14}
\ea I ate what they euphemistically referred to as a steak.
\ex There is what I suspect is a meteorite on the front lawn.
\z
\z
\item (\ili{English}) number agreement: \emph{what} determines singular
agreement inside and out in the \gls{FR} (\ref{ex:15.15}a), but it is the \gls{PN}
that determines the actual agreement in the \gls{TFR} (\ref{ex:15.15}b,c).
\ea\label{ex:15.15}
\ea What pleases\textbf{/}*please me most adorns/*adorn the living\\ room wall.
\ex What *seems/seem to be some meteorites *was/were \\ lying there.
\ex What seems/*seem to be a meteorite was/*were lying\\ there.
\z
\z
\item Adjectival agreement in Dutch is present in attributive adjectives
but not in predicative adjectives. The \gls{PA} in a
\gls{TFR} inflects like an adjective when the \gls{TFR} is adnominal.
That is, the \gls{PA} is the shared element.
\ea\label{ex:15.16}\ili{Dutch}\\
\gll een wat ik zou noemen eenvoudig-*(e) oplossing \\
a what I would call simple solution\\
\glt
\z
\item Idiom chunks: the \gls{PN} in the \gls{TFR} can complete a matrix
idiom.\is{idioms}
\ea\label{ex:15.17}
\ea The headway they made was impressive.
\ex They didn't make what can reasonably be considered headway.
\z
\z
\item Bound anaphors in the \gls{PN} of the \gls{TFR} can be bound by a matrix
antecedent, showing again that the \gls{PN} is the shared element.
\ea\label{ex:15.18}
\ea They live in what is often referred to as each other's backyard.
\ex She was what can only be interpreted as proud of herself.
\z
\z
\ea\label{ex:15.19}
\ea Bush\tss{i} would never acknowledge what Cheney\tss{j} refers to as [each other’s]\tss{i+j} mistakes.
\ex John\tss{i} hates to discuss what Mary\tss{j} calls [each other’ s]\tss{i+j} sexual deficiencies.
\z
\z
\item Case matching is required on the \gls{PN}. The examples are from German.
\ea\label{ex:15.20}\ili{German}
\ea[]{%
\gll Er hat was man einen\tss{\Acc{}} Halunken nennt\textsuperscript{\Acc{}} festgenommen\textsuperscript{\Acc{}}.\\
he has what one a scoundrel calls apprehended\\
\glt ‘He has apprehended what they call a scoundrel.’}
\ex[*]{%
\gll Er ist was man einen\tss{\Acc{}} / einem\tss{\Dat{}} Halunken nennt\textsuperscript{\Acc{}} auf den Leim gegangen\textsuperscript{\Dat{}}.\\
he is what one a / a scoundrel calls on the glue gone\\
\glt ‘He has been hoodwinked by what they call a scoundrel.'}
\z
\z
\end{itemize}
In (\ref{ex:15.20}a) the case requirements by the matrix clause and by the \gls{TFR} are
identical, they match. But note that the shared element that has to satisfy the
double case requirement is the \gls{PN}, not the \emph{wh}{}-word. This is
shown by (\ref{ex:15.20}b) where the case requirements on the \gls{PN} do not match. Note
also that case syncretism, which can resolve case mismatches in \glspl{FR} as
in \eqref{ex:15.21} also does so in \glspl{TFR}, cf. \eqref{ex:15.22}:
\ea\label{ex:15.21}\ili{German}
\ea[*]{%
\gll Wen\tss{\Acc{}} du liebst\textsuperscript{\Acc{}} ist\textsuperscript{\Nom{}} ein Halunke.\\
whom you love is a scoundrel\\
\glt }
\ex[]{%
\gll Was\tss{NOM/ACC} du liebst\textsuperscript{\Acc{}} ist\textsuperscript{\Nom{}} Pasta.\\
what you love is pasta\\
\glt }
\z
\z
The \emph{wh}{}-word \emph{wen} in (\ref{ex:15.21}a) can only be an accusative, hence we
have a case-mismatch which causes ungrammaticality. But in (\ref{ex:15.21}b) the
\emph{wh-}word \emph{was} is syncretic in that it can be both a nominative\is{nominative case} and
an accusative. Thereby the mismatch is avoided. Perhaps the most convincing
indication that in \glspl{TFR} it is the \gls{PN} that is the shared element
between the matrix clause and the (transparent) free relative is the fact
that the \gls{PN} shows syncretic behavior just like the \emph{wh-}word in
\glspl{FR}.\footnote{(\ref{ex:15.22}a) is an example of a case mismatch\is{case!case mismatches} in which the
accusative wins over the nominative\is{nominative case}. This is considered more or less
grammatical by many speakers of German, see \citet{Vogel2001} for
discussion.}
\ea\label{ex:15.22}\ili{German}
\ea[]{%
\gll Was viele einen\tss{\Acc{}} geilen\tss{\Acc{}} Wagen nennen\textsuperscript{\Acc{}} wird oft gekauft\textsuperscript{\Nom{}}.\\
what many a sexy car call is frequently bought\\
\glt }
\ex[*]{Was viele ein\tss{\Nom{}} geiler\tss{\Nom{}} Wagen nennen\textsuperscript{\Acc{}} wird oft gekauft\textsuperscript{\Nom{}}.}
\ex[]{Was viele ein\tss{NOM/ACC} geiles\tss{\Nom{}} Auto nennen\textsuperscript{\Acc{}} wird oft gekauft\textsuperscript{\Nom{}}.}
\z
\z
The important fact here is that, while \emph{Wagen and Auto} are synonymous,
\emph{Wagen} is a masculine noun while \emph{Auto} is neuter. In the paradigm
for masculine nouns the nominative\is{nominative case} and the accusative are distinct, but in the
paradigm for neuter nouns they are not, in other words there is syncretism in
the case morphology. Accordingly the case mismatch\is{case!case mismatches} in (\ref{ex:15.22}b) causes
ungrammaticality, but in (\ref{ex:15.22}c) the mismatch is avoided by syncretism.
The important thing about \glspl{TFR}, then, is that it is perfectly evident
that it is the \gls{PN}/PA of the \gls{TFR} that acts as the shared element,
i.e.\ the element that is also part of the matrix clause. There does not appear
to be an obvious way to posit a second position alongside the \gls{PN} which
could be used as the locus for a second case morpheme as in example \eqref{ex:15.6} above.
A graft approach directly expresses the notion that the \gls{PN} (or the PA) is
simultaneously part of the \gls{TFR} and of the matrix structure. By way of
illustration, here is a simplified graft derivation of a simple \gls{TFR}:
\ea\label{ex:15.23}
I ate what they called a steak.
\z\largerpage[2]
\begin{figure}[H]
\caption{\label{fig:ex:15.24}TFR analysis by grafting}
\begin{tikzpicture}[baseline]
\begin{scope}
\node [text width=5cm] (treeA) {input tree A (matrix/host):};
\end{scope}
\begin{scope}[xshift=3cm]
\Tree [.\node(v){V};
eat-
]
\end{scope}
\begin{scope}[yshift=-3cm]
\node [text width=5cm] (treeB) {tree B (grafted):};
\end{scope}
\begin{scope}[xshift=1cm, yshift=-2.5cm]
\Tree [.\node(cp){\textbf{CP}};
\edge[draw=none]; {}
[.\node(ip){\textbf{IP}};
\edge[draw=none]; {}
[.\emph{v}P
[.\node(dp1){DP}; they ]
[.VP
[.V call- ]
[.SC
[.\node(dp2){DP}; what ]
[.\node(dp3){DP}; \edge[roof]; {a
steak} ]
]
]
]
]
]
\end{scope}
\begin{scope}[xshift=6.5cm, yshift=-2.0cm]
\Tree [.\node(vbar){\textbf{V$'$}};
[.V eat- ]
\edge[draw=none]; {}
]
\end{scope}
\draw [->, shorten >=1mm, shorten <=1mm] (v) -|
node[below left]{graft}(vbar);
\draw [dashed] (ip.south).. controls +(south west:1.25) and
+(north east:.75)..node[left=.25cm, solid, draw]{1}(dp1.north);
\node at (1, -8) [inner sep=0mm] (control) {};
\draw [dashed] (cp.south).. controls +(south west:3.0)
..node[left=.25cm, solid, draw]{2}(control);
\draw [dashed] (control).. controls +(south east:2.5) and
+(north:1)..(dp2.north);
\draw [dashed] (vbar.south)..controls +(south east:2.5) and
+(north:2)..node[left=.5cm, solid, draw]{3}(dp3.north);
\end{tikzpicture}
\end{figure}
At this point we can draw three interim conclusions:
\begin{description}
\item[Interim conclusion 1:] Matching effects (and mismatches) in
\glspl{FR} and \glspl{TFR} must be dealt with in terms of a single position,
that is, the shared element.
\item[Interim conclusion 2:] Case attraction as well as its absence is a
process that occurs between two positions.
\item[Interim conclusion 3:] The phenomena of (mis-)matching and case
(non-)at\-trac\-tion are sufficiently similar to regard a theory in which we
need two separate treatments as a failure, hence we must study ways in which we
can interpret both phenomena as two sides of the same coin. We might call this
\textsc{the theoretician's dilemma}.
\end{description}
\section{Can we have our cake and eat it too?}\label{sec:15.3}
There is a simple and straightforward way to solve the theoretician’s dilemma.
We have been tacitly assuming that grafting applies to maximal projections, to
phrases. This is not only a simplification, but it is, in fact, wrong. First,
as I have argued in \citet{VanRiemsdijk2006b} grafting is not an exotic new
enrichment of the power of the theory but simply an instance of merge. Indeed,
a stipulation would be necessary to prevent merge from applying to, for
example, the adjective \emph{simple} with the noun \emph{matter} in \figref{fig:ex:15.10}.
But observe that limiting grafting to maximal phrases would also require a
stipulation that is unwarranted both from a theoretical perspective and for
empirical reasons.
This does not alter the fact that grafting is a powerful mechanism. There are
two reasons why this is unavoidable. First, I believe grafting is unavoidable
if we are to present cogent analyses for constructions like \glspl{FR} and
\glspl{TFR} (and many others such as Horn-amalgams, cf.\
\cite{VanRiemsdijk2006c}). There are many other cogent reasons for making
merge the central operation in syntax. As I have argued \parencite{VanRiemsdijk2006b}
grafting is an inevitable consequence of the introduction of merge. What seems
to be realized much less is that the adoption of merge inexorably initiates a
new program to search for powerful limitations of the descriptive power in much
the same way that the introduction of transformations in the 60s defined a
program to restrict them severely. If the program to restrict merge turns out
to be as fruitful as the program to restrict transformations, generative syntax
may look forward to a very bright future indeed. As for grafting, a very modest
attempt at restricting its power is presented in \citet{VanRiemsdijk2010}.
Returning now to the \enquote{theoretician’s dilemma}, consider the fact, for
example, that a \gls{TFR} can be inserted in the middle of a DP as in:
\ea\label{ex:15.25}
John has three what I would call gas guzzlers in his garage.
\z
In this example the shared element is the compound \emph{gas guzzler}. Inside
the matrix DP (\emph{three gas guzzlers}) the compound is not a complete DP
but, presumably, just N. In the \gls{TFR}, however, the \gls{PN} is a complete
DP\@. A very simplified tree structure for \eqref{ex:15.25} shows this (\figref{fig:ex:15.26}).
\begin{figure}
\caption{\label{fig:ex:15.26}`Attributive' \glspl{TFR}}
\begin{tikzpicture}[baseline]
\begin{scope}[xshift=0cm]
\Tree
[.DP
[.NUM three ]
[.N \node (gg) {gas guzzlers}; ]
]
\end{scope}
\begin{scope}[yshift=-8.5cm, xshift=-2.455cm, grow'=up]
\Tree
[.CP
[.SpeCP what$_i$ ]
[.IP
[.DP I ]
[.VP
[.V {would call} ]
[.SC
t$_i$
[.DP \node (GG) {N}; ]
]
]
]
]
\end{scope}
\draw (gg) -- (GG);
\end{tikzpicture}
\end{figure}
In our discussion about \enquote{one position or two}, what we are talking
about is positions in which the case features\is{case!case features} (or their ultimate spellout) are
located. And when we talk about case attraction and case (mis-)matching, these
positions are usually characterized as \enquote{K} (for Kase, to avoid
confusion between the ordinary word case and the grammatical term case). Before
showing how this would work for \glspl{TFR} with matching or mismatching case
such as those in \eqref{ex:15.22}, let us look at a simple case which shows
that this is typical and necessary for grafts involving inflectional
morphology.
Recall the third argument for a grafting analysis of \glspl{TFR} presented
above, cf.\ example \eqref{ex:15.16}. In Dutch attributive adjectives are inflected. The
rule is very simple. The \gls{AI} marker is always -ə
(spelled ‘-e’) unless the head noun is indefinite neuter singular, as in
(\ref{ex:15.27}e):\footnote{I have left out adjectives with non-count nouns. It should also
be pointed out that in Dutch spelling an adjective like \emph{groot} when
suffixed by –\emph{e} is spelled with a single ‘o’ (because the syllable
is open). For more detailed discussion, see
\textcites[11--13]{Broekhuis2013a}.}
\ea\label{ex:15.27}\ili{Dutch}
\ea een groot-*(e) woning\hfill indef.\ masc.\ sing.\\
(a large apartment)
\ex twee groot-*(e) woningen\hfill indef.\ masc.\ pl.\\
(two large apartments)
\ex de groot-*(e) woning\hfill def.\ masc.\ sing.\\
(the large apartment)
\ex de groot-*(e) woningen\hfill def.\ masc.\ pl.\\
(the large apartments)
\ex \textbf{een groot-(*e) huis}\hfill indef.\ neuter sing.\\
(a large house)\\
\ex twee groot-*(e) huizen\hfill indef.\ neuter pl.\\
(two large houses)
\ex het groot-*(e) huis\hfill def.\ neuter sing.\\
(the large house)
\ex de groot-*(e) huizen\hfill def.\ neuter pl.\\
(the large houses)
\z
\z
Example \eqref{ex:15.16}, repeated here as \eqref{ex:15.28}, can now be represented quite simply as
\figref{fig:ex:15.29}, where the AIs remain outside the shared adjective which is
grafted.\footnote{Not unexpectedly the same \gls{TFR} with a neuter noun is
perfectly grammatical as neither the matrix nor the \gls{TFR} requires a
\emph{-e} ending: \emph{een wat ik zou noemen groot huis}.}
\ea\label{ex:15.28}\ili{Dutch}\\
\gll een wat ik zou noemen eenvoudig-*(e) oplossing \\
a what I would call simple solution\\
\glt
\z
%%please move the includegraphics inside the {figure} environment
%%\includegraphics[width=\textwidth]{vanRiemsdijkrevised-img12.png}
%
% Dear copy{}-editor/ formatter, in \figref{fig:5} there is a small but
% important error which, due to my insufficient knowledge and means, I cannot
% correct. Underneath the blue oval there is a node label that reads
% AI{\textbar}. The last vertical stroke should be removed. That is, this label
% should simply read AI. I hope you can do this for me. Thanks.
%
%\emph{\figref{fig:5}: Mismatch avoidance with attributive adjectives}
\begin{figure}[p]
\caption{\label{fig:ex:15.29}Mismatch avoidance with attributive adjectives}
\begin{tikzpicture}[baseline]
\begin{scope}[xshift=3.60cm]
\Tree
[.DP
[.DET een ]
[.NP
[.AP
[.A \node [text width=1.8cm] (a) {eenvoudig-}; ]
[.AI \node (b1) {-e}; ]
]
[.N oplossing ]
]
]
\end{scope}
\begin{scope}[yshift=-11.15cm, xshift=-0.0cm, grow'=up]
\Tree
[.CP
[.SpecCP wat$_i$ ]
[.IP
[.DP ik ]
[.VP
[.V {zou noemen} ]
[.SC
[.{t$_i$} \edge[draw=none]; \node [text width=1.8cm] (A) {A}; ]
[.\node(ap){AP};
[.AI \node (b2) {$\emptyset$}; ]
]
]
]
]
]
\end{scope}
\draw (a) -- (A);
\draw (ap.north) -- (A.south);
\node [draw, thick, fit = (b1) (b2), inner sep=0mm] (box) {};
\end{tikzpicture}
\end{figure}
\begin{figure}[p]
\caption{\label{fig:ex:15.31}Case mismatch with \gls{TFR}}
\begin{tikzpicture}[baseline]
\begin{scope}[xshift=0cm,frontier/.style={distance from root=150pt}]
\tikzset{level 1/.style={sibling distance=40pt}}
\Tree
[.IP
[.KP
[.DP
[.DET \node (det) {ein}; ]
[.NP
[.AP \node [text width=1cm] (ap) {geiler}; ]
[.N \node [text width=1cm] (n) {Wagen}; ]
]
]
[.K [.\node(nom){NOM}; ] ]
]
\edge [roof]; {wird oft gekauft}
]
\end{scope}
\begin{scope}[yshift=-13.75cm, xshift=-6.05cm, grow'=up,
frontier/.style={distance from root=241pt}]
\Tree
[.CP
[.SpecCP Was$_i$ ]
[.IP
[.DP viele ]
[.VP
[.SC
[.DP {t$_i$} ]
[.KP
[.DP
[.\node(DET){DET}; ]
[.NP
[.\node[text width=1cm](AP){AP}; ]
[.\node[text width=1cm](N){N}; ]
]
]
[.K [.\node(acc){ACC}; ] ]
]
]
[.V nennen ]
]
]
]
\end{scope}
\draw (det) -- (DET);
\draw (ap) -- (AP);
\draw (n) -- (N);
\node [draw, thick, fit = (nom) (acc), inner sep=0mm] (box) {};
\end{tikzpicture}
\end{figure}
We see that what looked like a morphological mismatch is resolved in structure
\figref{fig:ex:15.29} as we have two separate positions. A conflict is avoided because one of
the two AI positions is empty.\footnote{For discussion of other cases involving
agglutinative morphology and also an extension to the issue of how the theta
criterion can be maintained in grafting structures, see \citet{VanRiemsdijk2010}.} With this in hand, we can address the issue of case
(mis-)matches, for example in \glspl{TFR}.\clearpage
Take example (\ref{ex:15.22}b), repeated here as \eqref{ex:15.30}.
\ea\ili{German}\\
\gll \llap{*}Was viele ein\tss{\Nom{}} geiler\tss{\Nom{}} Wagen nennen\textsuperscript{\Acc{}} wird oft gekauft\textsuperscript{\Nom{}}.\\
what many a sexy car call is often bought\\
\glt ‘What many would call a sexy car is frequently bought.’\label{ex:15.30}
\z
The structure for such a \gls{TFR} would be roughly as in \figref{fig:ex:15.31}.
The case mismatch\is{case!case mismatches} can now be localized in the box, where \Nom{} and \Acc{} are
in conflict with each other. In this example the matrix case \Nom{} has won,
which results in ungrammaticality. If the \gls{TFR} case \Acc{} wins, as in
(\ref{ex:15.22}a) there is still a conflict, but according to the case
hierarchy \Acc{} supersedes \Nom{}. And indeed, this example is perfect for
some varieties of German and definitely much better than (\ref{ex:15.22}b)
for all speakers (see also example \eqref{ex:15.5} above and
\cref{fn:15.6}).
This solution closes the circle in that case (mis-)matching in \glspl{FR} can
be treated in a completely parallel way. Take the example (\ref{ex:15.5}a)
above, repeated here as \eqref{ex:15.32}. \figref{fig:ex:15.33} is a very
simplified tree depicting the relevant structure.\largerpage[2]
\ea\label{ex:15.32}\ili{German}\\
\gll Wen\tss{\Acc{}} du einlädst\textsuperscript{\Acc{}} wird kommen\textsuperscript{\Nom{}}.\\
who-\Acc{} you invite will come\\
\glt
\z
\begin{figure}[H]
\caption{\label{fig:ex:15.33}Case mismatch resolved by superimposition}
\begin{tikzpicture}[baseline]
\begin{scope}[frontier/.style={distance from root=100pt}]
\tikzset{level 1/.style={sibling distance=90pt}}
\Tree
[.CP
[.SpecCP \node (nom) {wer\textsubscript{NOM\emph{i}}}; ]
[.IP
[.DP$_j$\textsuperscript{NOM}
e$_i$
wird
]
[.VP kommen ]
]
]
\end{scope}
\begin{scope}[yshift=-7.7cm, xshift=-1.7cm, grow'=up,
frontier/.style={distance from root=100pt}]
% \tikzset{level 1/.style={sibling distance=30pt}}
\Tree
[.CP
[.SpecCP \node (acc) {wen\textsubscript{ACC\emph{j}}}; ]
[.IP
[.DP du ]
[.VP
{DP$_j$\textsuperscript{ACC}}
einlädst
]
]
]
\end{scope}
\node [draw, thick, fit = (nom) (acc), inner sep=0mm] (box) {};
\end{tikzpicture}
\end{figure}
This is a typical example of a case mismatch\is{case!case mismatches} that is, however, accepted by many
speakers of German. As there is only one position in which a wh-word can be
spelled out, the mismatch must be resolved. It is resolved in the rectangle in
that the accusative wins over the nominative\is{nominative case}, as predicted by the Case
Hierarchy. In very strict versions of German, which do not accept this
mismatch, the battle has no winner and the derivation crashes as both
wh-words\is{wh-words}
cannot be spelled out simultaneously.\footnote{The question arises as to whether
the resolution of case conflicts that ultimately determines the spell-out takes
place in narrow syntax or post-syntactically, as an anonymous reviewer asks.
The answer has to be that this must be a matter of post-syntactic spell-out.
The most convincing considerations arguing for this view have to do with the
way that syncretism helps resolve case conflicts. Space prevents me from going
into the details here however.}\\
\section{Conclusion}
We started out with a puzzle. Case attraction and case (mis-)matching in normal
and transparent free relatives are sufficiently similar to aim for a unified
treatment of both. But case attraction involves an interaction between two
positions while case (mis-)matches seemingly involve only one position, at
least if, as I have argued, they are accounted for in terms of grafting. What
I hope to have shown is that there are good independent reasons for adopting
analyses in terms of sub-phrasal grafts which allow us to have two tree
positions for the matching or conflicting morphological elements, but only a
single spell-out position. Thereby we are an important step closer to a unified
theory of attraction and (mis)matching.
\printchapterglossary{}
\section*{Acknowledgements}
Parts of this article were presented at the workshop
\emph{Insufficient strength to defend its case: Case attraction and related
phenomena} at the University of Wrocław in September 2015. Thanks are due
to the audience for interesting discussion. In particular I would like to
thank Joanna Błaszczak and Philomen Probert for having invited me to this
conference which gave me a chance to clarify my thoughts on attraction and
matching. Thanks are also due to two anonymous reviewers. The more general
background for these issues is the antithesis of two very general forces
that manifest themselves in many ways and in many aspects of the physical
world: attraction and repulsion, see~\textcite{VanRiemsdijk2019}.
{\sloppy
\printbibliography[heading=subbibliography,notkeyword=this]
}
\end{refcontext}
\end{document}
|
{"hexsha": "7372a12f33d019475264b400409156f64aa58308", "size": 43852, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/15.tex", "max_stars_repo_name": "langsci/277", "max_stars_repo_head_hexsha": "80f4daa0d585057e668d6581927bb35c73e51828", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/15.tex", "max_issues_repo_name": "langsci/277", "max_issues_repo_head_hexsha": "80f4daa0d585057e668d6581927bb35c73e51828", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/15.tex", "max_forks_repo_name": "langsci/277", "max_forks_repo_head_hexsha": "80f4daa0d585057e668d6581927bb35c73e51828", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5026511135, "max_line_length": 162, "alphanum_fraction": 0.6419547569, "num_tokens": 12295}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 13:17:09 2020
@author: mateusz
"""
import torch.nn as nn
import random
import torch
import copy
from collections import namedtuple
import numpy as np
from utils import dictionary_of_actions, dict_of_actions_revert_q
class DQN(object):
def __init__(self, conf, action_size, state_size, device):
self.num_qubits = conf['env']['num_qubits']
self.num_layers = conf['env']['num_layers']
memory_size = conf['agent']['memory_size']
self.final_gamma = conf['agent']['final_gamma']
self.epsilon_min = conf['agent']['epsilon_min']
self.epsilon_decay = conf['agent']['epsilon_decay']
learning_rate = conf['agent']['learning_rate']
self.update_target_net = conf['agent']['update_target_net']
neuron_list = conf['agent']['neurons']
drop_prob = conf['agent']['dropout']
self.with_angles = conf['agent']['angles']
if "memory_reset_switch" in conf['agent'].keys():
self.memory_reset_switch = conf['agent']["memory_reset_switch"]
self.memory_reset_threshold = conf['agent']["memory_reset_threshold"]
self.memory_reset_counter = 0
else:
self.memory_reset_switch = False
self.memory_reset_threshold = False
self.memory_reset_counter = False
self.action_size = action_size
self.state_size = state_size if self.with_angles else state_size - self.num_layers
self.state_size = self.state_size + 1 if conf['agent']['en_state'] else self.state_size
self.translate = dictionary_of_actions(self.num_qubits)
self.rev_translate = dict_of_actions_revert_q(self.num_qubits)
self.policy_net = self.unpack_network(neuron_list, drop_prob).to(device)
self.target_net = copy.deepcopy(self.policy_net)
self.target_net.eval()
self.gamma = torch.Tensor([np.round(np.power(self.final_gamma,1/self.num_layers),2)]).to(device) # discount rate
self.memory = ReplayMemory(memory_size)
self.epsilon = 1.0 # exploration rate
self.optim = torch.optim.Adam(self.policy_net.parameters(), lr=learning_rate)
self.loss = torch.nn.SmoothL1Loss()
self.device = device
self.step_counter = 0
self.Transition = namedtuple('Transition',
('state', 'action', 'reward',
'next_state','done'))
def remember(self, state, action, reward, next_state, done):
self.memory.push(state, action, reward, next_state, done)
def act(self, state):
state = state.unsqueeze(0)
epsilon = False
if torch.rand(1).item() <= self.epsilon:
epsilon = True
return (torch.randint(self.action_size, (1,)).item(), epsilon)
act_values = self.policy_net.forward(state)
return torch.argmax(act_values[0]).item(), epsilon
def replay(self, batch_size):
if self.step_counter %self.update_target_net ==0:
self.target_net.load_state_dict(self.policy_net.state_dict())
self.step_counter += 1
transitions = self.memory.sample(batch_size)
batch = self.Transition(*zip(*transitions))
next_state_batch = torch.stack(batch.next_state)
state_batch = torch.stack(batch.state)
action_batch = torch.stack(batch.action)#, device=self.device)
reward_batch = torch.stack(batch.reward)#.to(device=self.device)
done_batch = torch.stack(batch.done)#.to(device=self.device)
state_action_values = self.policy_net.forward(state_batch).gather(1, action_batch.unsqueeze(1))
""" Double DQN """
next_state_values = self.target_net.forward(next_state_batch)
next_state_actions = self.policy_net.forward(next_state_batch).max(1)[1].detach()
next_state_values = next_state_values.gather(1, next_state_actions.unsqueeze(1)).squeeze(1)
""" Compute the expected Q values """
expected_state_action_values = (next_state_values * self.gamma) * (1-done_batch) + reward_batch
expected_state_action_values = expected_state_action_values.view(-1, 1)
assert state_action_values.shape == expected_state_action_values.shape, "Wrong shapes in loss"
cost = self.fit(state_action_values, expected_state_action_values)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
self.epsilon = max(self.epsilon,self.epsilon_min)
assert self.epsilon >= self.epsilon_min, "Problem with epsilons"
return cost
def fit(self, output, target_f):
self.optim.zero_grad()
loss = self.loss(output, target_f)
loss.backward()
self.optim.step()
return loss.item()
def unpack_network(self, neuron_list, p):
layer_list = []
neuron_list = [self.state_size] + neuron_list
for input_n, output_n in zip(neuron_list[:-1], neuron_list[1:]):
layer_list.append(nn.Linear(input_n, output_n))
layer_list.append(nn.LeakyReLU())
layer_list.append(nn.Dropout(p=p))
layer_list.append(nn.Linear(neuron_list[-1], self.action_size))
return nn.Sequential(*layer_list)
class ReplayMemory(object):
def __init__(self, capacity: int):
self.capacity = capacity
self.memory = []
self.position = 0
self.Transition = namedtuple('Transition',
('state', 'action', 'reward',
'next_state','done'))
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = self.Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
def clean_memory(self):
self.memory = []
self.position = 0
if __name__ == '__main__':
pass
|
{"hexsha": "91559b842822c1c10c4b75532519011cfb7b948d", "size": 6276, "ext": "py", "lang": "Python", "max_stars_repo_path": "agents/DeepQ.py", "max_stars_repo_name": "empyriumz/QAS_RL", "max_stars_repo_head_hexsha": "1f44f46acd9e61a8ed501cc7f0462c7217f46316", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "agents/DeepQ.py", "max_issues_repo_name": "empyriumz/QAS_RL", "max_issues_repo_head_hexsha": "1f44f46acd9e61a8ed501cc7f0462c7217f46316", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agents/DeepQ.py", "max_forks_repo_name": "empyriumz/QAS_RL", "max_forks_repo_head_hexsha": "1f44f46acd9e61a8ed501cc7f0462c7217f46316", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1360946746, "max_line_length": 122, "alphanum_fraction": 0.6312938177, "include": true, "reason": "import numpy", "num_tokens": 1337}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import numpy as np
import pandas as pd
import pytest
from eemeter.segmentation import (
CalTRACKSegmentModel,
SegmentedModel,
segment_time_series,
iterate_segmented_dataset,
)
@pytest.fixture
def index_8760():
return pd.date_range("2017-01-01", periods=365 * 24, freq="H", tz="UTC")
def test_segment_time_series_invalid_type(index_8760):
with pytest.raises(ValueError):
segment_time_series(index_8760, segment_type="unknown")
def test_segment_time_series_single(index_8760):
weights = segment_time_series(index_8760, segment_type="single")
assert list(weights.columns) == ["all"]
assert weights.shape == (8760, 1)
assert weights.sum().sum() == 8760.0
def test_segment_time_series_one_month(index_8760):
weights = segment_time_series(index_8760, segment_type="one_month")
assert list(weights.columns) == [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 8760.0
def test_segment_time_series_three_month(index_8760):
weights = segment_time_series(index_8760, segment_type="three_month")
assert list(weights.columns) == [
"dec-jan-feb",
"jan-feb-mar",
"feb-mar-apr",
"mar-apr-may",
"apr-may-jun",
"may-jun-jul",
"jun-jul-aug",
"jul-aug-sep",
"aug-sep-oct",
"sep-oct-nov",
"oct-nov-dec",
"nov-dec-jan",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 26280.0
def test_segment_time_series_three_month_weighted(index_8760):
weights = segment_time_series(index_8760, segment_type="three_month_weighted")
assert list(weights.columns) == [
"dec-jan-feb-weighted",
"jan-feb-mar-weighted",
"feb-mar-apr-weighted",
"mar-apr-may-weighted",
"apr-may-jun-weighted",
"may-jun-jul-weighted",
"jun-jul-aug-weighted",
"jul-aug-sep-weighted",
"aug-sep-oct-weighted",
"sep-oct-nov-weighted",
"oct-nov-dec-weighted",
"nov-dec-jan-weighted",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 17520.0
def test_segment_time_series_drop_zero_weight_segments(index_8760):
weights = segment_time_series(
index_8760[:100], segment_type="one_month", drop_zero_weight_segments=True
)
assert list(weights.columns) == ["jan"]
assert weights.shape == (100, 1)
assert weights.sum().sum() == 100.0
@pytest.fixture
def dataset():
index = pd.date_range("2017-01-01", periods=1000, freq="H", tz="UTC")
return pd.DataFrame({"a": 1, "b": 2}, index=index, columns=["a", "b"])
def test_iterate_segmented_dataset_no_segmentation(dataset):
iterator = iterate_segmented_dataset(dataset, segmentation=None)
segment_name, data = next(iterator)
assert segment_name is None
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000
with pytest.raises(StopIteration):
next(iterator)
@pytest.fixture
def segmentation(dataset):
return segment_time_series(dataset.index, segment_type="one_month")
def test_iterate_segmented_dataset_with_segmentation(dataset, segmentation):
iterator = iterate_segmented_dataset(dataset, segmentation=segmentation)
segment_name, data = next(iterator)
assert segment_name == "jan"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (744, 3)
assert data.sum().sum() == 2976.0
segment_name, data = next(iterator)
assert segment_name == "feb"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (256, 3)
assert data.sum().sum() == 1024.0
segment_name, data = next(iterator)
assert segment_name == "mar"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (0, 3)
assert data.sum().sum() == 0.0
def test_iterate_segmented_dataset_with_processor(dataset, segmentation):
feature_processor_segment_names = []
def feature_processor(
segment_name, dataset, column_mapping=None
): # rename some columns
feature_processor_segment_names.append(segment_name)
return dataset.rename(columns=column_mapping).assign(weight=1)
iterator = iterate_segmented_dataset(
dataset,
segmentation=segmentation,
feature_processor=feature_processor,
feature_processor_kwargs={"column_mapping": {"a": "c", "b": "d"}},
feature_processor_segment_name_mapping={"jan": "jan2", "feb": "feb2"},
)
segment_name, data = next(iterator)
assert feature_processor_segment_names == ["jan2"]
assert segment_name == "jan"
assert list(data.columns) == ["c", "d", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000.0
segment_name, data = next(iterator)
assert feature_processor_segment_names == ["jan2", "feb2"]
assert segment_name == "feb"
assert list(data.columns) == ["c", "d", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000.0
def test_segment_model():
segment_model = CalTRACKSegmentModel(
segment_name="segment",
model=None,
formula="meter_value ~ C(hour_of_week) + a - 1",
model_params={"C(hour_of_week)[1]": 1, "a": 1},
warnings=None,
)
index = pd.date_range("2017-01-01", periods=2, freq="H", tz="UTC")
data = pd.DataFrame({"a": [1, 1], "hour_of_week": [1, 1]}, index=index)
prediction = segment_model.predict(data)
assert prediction.sum() == 4
def test_segmented_model():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ C(hour_of_week) + a- 1",
model_params={"C(hour_of_week)[1]": 1, "a": 1},
warnings=None,
)
def fake_feature_processor(segment_name, segment_data):
return pd.DataFrame(
{"hour_of_week": 1, "a": 1, "weight": segment_data.weight},
index=segment_data.index,
)
segmented_model = SegmentedModel(
segment_models=[segment_model],
prediction_segment_type="one_month",
prediction_segment_name_mapping=None,
prediction_feature_processor=fake_feature_processor,
prediction_feature_processor_kwargs=None,
)
# make this cover jan and feb but only supply jan model
index = pd.date_range("2017-01-01", periods=24 * 50, freq="H", tz="UTC")
temps = pd.Series(np.linspace(0, 100, 24 * 50), index=index)
prediction = segmented_model.predict(temps.index, temps).result.predicted_usage
assert prediction.sum() == 1488.0
def test_segment_model_serialized():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ a + b - 1",
model_params={"a": 1, "b": 1},
warnings=None,
)
assert segment_model.json()["formula"] == "meter_value ~ a + b - 1"
assert segment_model.json()["model_params"] == {"a": 1, "b": 1}
assert segment_model.json()["warnings"] == []
assert json.dumps(segment_model.json())
def test_segmented_model_serialized():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ a + b - 1",
model_params={"a": 1, "b": 1},
warnings=None,
)
def fake_feature_processor(segment_name, segment_data): # pragma: no cover
return pd.DataFrame(
{"a": 1, "b": 1, "weight": segment_data.weight}, index=segment_data.index
)
segmented_model = SegmentedModel(
segment_models=[segment_model],
prediction_segment_type="one_month",
prediction_segment_name_mapping=None,
prediction_feature_processor=fake_feature_processor,
prediction_feature_processor_kwargs=None,
)
assert segmented_model.json()["prediction_segment_type"] == "one_month"
assert (
segmented_model.json()["prediction_feature_processor"]
== "fake_feature_processor"
)
assert json.dumps(segmented_model.json())
|
{"hexsha": "1c32d80c636c7007f06dbb384a20862f86d2b290", "size": 8945, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_segmentation.py", "max_stars_repo_name": "goitom/eemeter", "max_stars_repo_head_hexsha": "bb05d5b776546858f8f3a8d3a95bec202728d9f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_segmentation.py", "max_issues_repo_name": "goitom/eemeter", "max_issues_repo_head_hexsha": "bb05d5b776546858f8f3a8d3a95bec202728d9f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_segmentation.py", "max_forks_repo_name": "goitom/eemeter", "max_forks_repo_head_hexsha": "bb05d5b776546858f8f3a8d3a95bec202728d9f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0609318996, "max_line_length": 85, "alphanum_fraction": 0.6512017887, "include": true, "reason": "import numpy", "num_tokens": 2234}
|
import sys, pdb
import sqlalchemy as sa
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
from obspy.core import trace
from obspy.core import Stream
from obspy.core.util import AttribDict
from datetime import datetime
import numpy as np
from numpy import append
import pisces as ps
from pisces import request
from IPython import embed
import time
def get_channel(session, Site, Wfdisc, array):
ssSite = session.query(Site).filter(Site.refsta==array).all()
wf_T
for ssi in ssSite:
wf = session.query(Wfdisc).filter(Wfdisc.sta==ssi).distinct()
wf_T.append(wf)
return np.unique(wf_T)
def get_arraywaveforms(session, Site, Wfdisc, array, t0=None, te=None, channel=None):
ssSite = session.query(Site).filter(Site.refsta==array).all()
wf = session.query(Wfdisc)
aa = Stream()
for ssi in ssSite:
if t0 == None:
wfT = wf.filter(Wfdisc.sta==ssi.sta).all()
timeI = []
timeF = []
for wfTi in wfT:
timeI.append(wfTi.time)
timeF.append(wfTi.endtime)
timeI = np.asanyarray(timeI)
timeF = np.asanyarray(timeF)
t0 = max(timeI)
te = min(timeF)
for t1 in range(5):
try:
aaT = request.get_waveforms(session, Wfdisc, station=ssi.sta, starttime=t0,
endtime=te,channel=channel)
break
except:
print('try get data:',t1)
print('go to sleep for 5 seconds and try again')
time.sleep(5)
if t1 == 4:
print('There is problem connecting to the data waveforms')
exit()
if len(aaT) == 0:
print('maybe this is a ref name, sta:',ssi.sta)
continue
aaT.merge(fill_value=0)
#if not len(aaT) == 1:
# print('there is a problem with data retrieving; there is more than one trace for this station')
# sys.exit(0)
aaT[0].stats.coordinates = AttribDict({'latitude': ssi.lat,'elevation': ssi.elev,'longitude': ssi.lon})
aa = aa + aaT
aa.merge(fill_value=0)
return aa
|
{"hexsha": "e8d547b447c39ca484c21fd27dcaefc812ec57f8", "size": 2271, "ext": "py", "lang": "Python", "max_stars_repo_path": "infrapy/utils/get_arraywaveforms.py", "max_stars_repo_name": "LANL-Seismoacoustics/infrapy", "max_stars_repo_head_hexsha": "132c1f5f9c074eca7300ab35d23109d8423a9912", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-03-17T18:43:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T17:47:14.000Z", "max_issues_repo_path": "infrapy/utils/get_arraywaveforms.py", "max_issues_repo_name": "LANL-Seismoacoustics/infrapy", "max_issues_repo_head_hexsha": "132c1f5f9c074eca7300ab35d23109d8423a9912", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-04-28T01:09:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T06:06:20.000Z", "max_forks_repo_path": "infrapy/utils/get_arraywaveforms.py", "max_forks_repo_name": "LANL-Seismoacoustics/infrapy", "max_forks_repo_head_hexsha": "132c1f5f9c074eca7300ab35d23109d8423a9912", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-08T20:29:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T18:03:39.000Z", "avg_line_length": 30.28, "max_line_length": 111, "alphanum_fraction": 0.582122413, "include": true, "reason": "import numpy,from numpy", "num_tokens": 571}
|
#!/usr/bin/env python
"""Generic utils for LoFreq
"""
__author__ = "Andreas Wilm"
__email__ = "wilma@gis.a-star.edu.sg"
__copyright__ = "2011 Genome Institute of Singapore"
__license__ = "The MIT License"
#--- standard library imports
#
from math import log10, log
import sys
from time import strftime
import string
MAX_INT = 2147483647
# instead of sys.maxint
#--- third-party imports
#
from scipy.stats import chi2
#--- project specific imports
#
# nothing should go here by definition
#def mean_and_stdv(x):
# """
# Calculate mean and standard deviation of data x[]:
# mean = {\sum_i x_i \over n}
# std = sqrt(\sum_i (x_i - mean)^2 \over n-1)
#
# Based on
# http://www.physics.rutgers.edu/~masud/computing/WPark_recipes_in_python.html
# """
#
# num = len(x)
# assert num != 0
# if num == 1:
# return (x[0], 0.0)
#
# mean = sum(x)/float(num)
# std = sum([(a-mean)**2 for a in x])
# std = sqrt(std / float(num-1))
#
# return mean, std
def now():
return strftime("%Y-%m-%d %H:%M:%S")
def fisher_comb(pv1, pv2):
"""
Fisher's method for combining p-values
See for example
http://en.wikipedia.org/wiki/Fisher's_method
and
breseq-0.18b:polymorphism_statistics.r
"""
if pv1 == 0 or pv2 == 0:
# not sure if this is correct.
# see also http://stats.stackexchange.com/questions/58537/fishers-method-when-p-value-0
return 0.0
comb_log = -2.0 * (log(pv1) + log(pv2))
# http://stackoverflow.com/questions/11725115/p-value-from-chi-sq-test-statistic-in-python
comb_pv = 1.0 - chi2.cdf(comb_log, 4)
return comb_pv
def complement(strand, na_type='DNA'):
"""return complement of nucleic acid seqeunce
original source http://stackoverflow.com/questions/1738633/more-pythonic-way-to-find-a-complementary-dna-strand
Nadia Alramli
Added DNA/RNA handling
>>> complement("UTAGC")
'AATCG'
>>> complement("utagc")
'aatcg'
>>> complement("UTAGC", na_type="RNA")
'AAUCG'
>>> complement("utagc", na_type="RNA")
'aaucg'
"""
if na_type == 'DNA':
if sys.version_info[0] > 2:
tr = bytes.maketrans(b'UTAGCutagc', b'AATCGaatcg')
else:
tr = string.maketrans('UTAGCutagc', 'AATCGaatcg')
elif na_type == 'RNA':
if sys.version_info[0] > 2:
tr = bytes.maketrans(b'UTAGCutagc', b'AAUCGaaucg')
else:
tr = string.maketrans('UTAGCutagc', 'AAUCGaaucg')
else:
raise ValueError("Unknown NA type %s" % na_type)
return strand.translate(tr)
def prob_to_phredqual(prob):
"""
Turns an error probability into a phred value
>>> prob_to_phredqual(0.01)
20
"""
assert prob >= 0.0, (
"Probability can't be smaller than 0 but got %f" % prob)
try:
return int(round(-10.0 * log10(prob)))
except ValueError:
# prob is zero
#return sys.maxint
return MAX_INT
def phredqual_to_prob(phredqual):
"""
Turns a phred quality into an error probability
>>> '%.2f' % phredqual_to_prob(20)
'0.01'
"""
assert isinstance(phredqual, int)
#assert phredqual >= 0, ("Phred-quality must be >= 0, but is %s" % phredqual)
# also works for phredqual=0
return 10**(-phredqual/10.0)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{"hexsha": "49d8e74a95989f983215d0d29431e7bfcecfab69", "size": 3465, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tools/lofreq_star/utils.py", "max_stars_repo_name": "joshwkearney/lofreq", "max_stars_repo_head_hexsha": "8966e95044875ec9068d2ea4d1cf72ed96d92781", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2015-01-02T19:18:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T04:16:18.000Z", "max_issues_repo_path": "src/tools/lofreq_star/utils.py", "max_issues_repo_name": "joshwkearney/lofreq", "max_issues_repo_head_hexsha": "8966e95044875ec9068d2ea4d1cf72ed96d92781", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 125, "max_issues_repo_issues_event_min_datetime": "2015-01-06T07:25:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T12:56:23.000Z", "max_forks_repo_path": "src/tools/lofreq_star/utils.py", "max_forks_repo_name": "joshwkearney/lofreq", "max_forks_repo_head_hexsha": "8966e95044875ec9068d2ea4d1cf72ed96d92781", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2015-01-14T00:41:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T14:45:13.000Z", "avg_line_length": 21.9303797468, "max_line_length": 115, "alphanum_fraction": 0.6063492063, "include": true, "reason": "from scipy", "num_tokens": 1043}
|
import os
from collections import Counter
from typing import List, Dict
from OurPaper.myconstants import *
import numpy as np
from detectron2.data import MetadataCatalog, DatasetCatalog, \
build_detection_test_loader, build_detection_train_loader
from detectron2.data.datasets.coco import load_coco_json, convert_to_coco_json
from detectron2.data.datasets.meta_coco import load_meta_coco_json
def get_COCO_subsplit(classes):
"""
Given a list of category id's present in the original COCO category, return a list of
Example:
[{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, ...]
"""
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
# Get all classes in COCO categories which pertain to the given class indices in 'classes'
filtered_list = [cls for cls in COCO_CATEGORIES if cls['id'] in classes]
# Warn that some given classes have been ignored due to them not being in COCO at all
filtered_classes = [cls['id'] for cls in filtered_list]
ignored_classes = [cls for cls in classes if cls not in filtered_classes]
if len(ignored_classes) != 0:
print(f'Ignored the following classes: {ignored_classes}')
# TODO(): Perhaps turn this into some assert.
# Raise an error if any of the classes have the 'isthing' attribute, since they are not supported
for cls in filtered_list:
if cls['isthing'] == 0:
raise ValueError('given classes contain isthing = 0, but isthing not supported')
return filtered_list
def build_metadata_from_subsplit_COCO(class_indices):
filtered_COCO_classes = get_COCO_subsplit(class_indices)
thing_ids = [cls['id'] for cls in filtered_COCO_classes if cls['isthing'] == 1]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [cls['name'] for cls in filtered_COCO_classes if cls['isthing'] == 1]
thing_colors = [cls['color'] for cls in filtered_COCO_classes if cls['isthing'] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
"classes_to_eval": class_indices
}
return ret
def register_test_class_coco(name, class_indices, json_path='cocosplit/datasplit/trainvalno5k.json',
imgroot='coco/trainval2014'):
# TODO(): Hardcoded datasets root folder
json_path, imgroot = os.path.join('datasets', json_path), os.path.join('datasets', imgroot)
meta = build_metadata_from_subsplit_COCO(class_indices)
print(f"Registering dataset named {name} having the following classes: {meta['thing_classes']}")
DatasetCatalog.register(name, lambda: load_meta_coco_json(json_path, imgroot, meta, name))
# Set the meta-data
MetadataCatalog.get(name).set(
json_file=json_path, image_root=imgroot, evaluator_type="coco",
dirname="datasets/coco", **meta,
)
def _get_novel_base_splits_coco(novel_classes_split=0, novel_cls_idx=None, base_cls_idx=None):
"""
Code to get the 4 possible COCO-splits according to "One-Shot Instance Segmentation"
"""
if novel_cls_idx is None: # Class indices not provided, build them from the split
if base_cls_idx is not None:
raise ValueError('base_cls_idx is not None while novel_cls_idx is!')
if novel_classes_split >= COCO_MAX_NOVEL_SPLIT or novel_classes_split < 0:
raise ValueError(f'Dataset only supports {COCO_MAX_NOVEL_SPLIT} possible one_shot splits')
index = novel_classes_split
step_size = COCO_ALL_CLASSES_NUMBER / COCO_CLASSES_PER_SPLIT
novel_cls_idx = np.array([step_size * i + index for i in range(COCO_CLASSES_PER_SPLIT)])
# The base classes are just the other classes of the dataset.
base_cls_idx = np.array(range(COCO_ALL_CLASSES_NUMBER))[
np.array([i not in novel_cls_idx for i in range(COCO_ALL_CLASSES_NUMBER)])]
return novel_cls_idx, base_cls_idx
def prepare_coco_fewshot(coco_dataset, output_dir, novel_classes_split=0, novel_cls_idx=None, base_cls_idx=None):
"""
Given a coco_dataset supported by Detectron2 ( coco_2017_train, coco_2017_val etc) load it's JSON file and create
another JSON file that only includes the
"""
novel_cls_idx, base_cls_idx = _get_novel_base_splits_coco(novel_classes_split, novel_cls_idx, base_cls_idx)
# img_root, json_file = PREDEFINED_COCO_PATHS[]
img_root, json_file = os.path.join(OURPAPER_DATASET_FOLDER, img_root), os.path.join(OURPAPER_DATASET_FOLDER, json_file)
def get_dataset_dict_coco(dataset_path=None, novel_classes_split=0, novel_cls_idx=None, base_cls_idx=None):
"""
TODO(): Description
Load the COCO 2017 dataset.
"""
if dataset_path is None:
raise ValueError('No dataset path provided!')
if novel_cls_idx is None: # Class indices not provided, build them from the split
if base_cls_idx is not None:
raise ValueError('base_cls_idx is not None while novel_cls_idx is!')
if novel_classes_split >= COCO_MAX_NOVEL_SPLIT or novel_classes_split < 0:
raise ValueError(f'Dataset only supports {COCO_MAX_NOVEL_SPLIT} possible one_shot splits')
index = novel_classes_split
step_size = COCO_ALL_CLASSES_NUMBER / COCO_CLASSES_PER_SPLIT
novel_cls_idx = np.array([step_size * i + index for i in range(COCO_CLASSES_PER_SPLIT)])
# The base classes are just the other classes of the dataset.
base_cls_idx = np.array(range(COCO_ALL_CLASSES_NUMBER))[
np.array(i not in novel_classes_split for i in range(COCO_ALL_CLASSES_NUMBER))]
def custom_dataset_dict(json_file, img_root, name, subset='train'):
# Basically try out register_coco_instances but without the metadata.
dataset_dicts = load_coco_json(json_file, img_root, name)
# Logic for filtering dataset_dicts can go here. This includes omitting some classes and such.
# End of logic
return dataset_dicts
def custom_register_dataset(name, train=True, metadata={}):
base_dataset = 'coco_2017' # Assume test datasets always based on coco_2017 for now
if train:
base_dataset = base_dataset + '_train'
name = name + '_train'
else:
base_dataset = base_dataset + '_val'
name = name + '_val'
img_root, json_file = PREDEFINED_COCO_PATHS[base_dataset]
img_root, json_file = os.path.join(OURPAPER_DATASET_FOLDER, img_root), os.path.join(OURPAPER_DATASET_FOLDER, json_file)
DatasetCatalog.register(name, lambda: custom_dataset_dict(json_file, img_root, name))
MetadataCatalog.get(name).set(
json_file=json_file, image_root=img_root, evaluator_type="coco", **metadata
)
get_dataset_dict = get_dataset_dict_coco
# get_dataset_dict = get_dataset_dict_coco
|
{"hexsha": "1b311e79e5ef17e7779e5d45dc94a142bf0be922", "size": 6868, "ext": "py", "lang": "Python", "max_stars_repo_path": "OurPaper/dataset.py", "max_stars_repo_name": "superclass-FSIS/test", "max_stars_repo_head_hexsha": "9bb2844c77704a609291135b75e94a794f235aa0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2021-04-09T12:52:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:08:58.000Z", "max_issues_repo_path": "OurPaper/dataset.py", "max_issues_repo_name": "superclass-FSIS/test", "max_issues_repo_head_hexsha": "9bb2844c77704a609291135b75e94a794f235aa0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-06-29T07:47:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T04:21:00.000Z", "max_forks_repo_path": "OurPaper/dataset.py", "max_forks_repo_name": "superclass-FSIS/test", "max_forks_repo_head_hexsha": "9bb2844c77704a609291135b75e94a794f235aa0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-05-23T05:48:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T06:42:13.000Z", "avg_line_length": 45.1842105263, "max_line_length": 123, "alphanum_fraction": 0.729324403, "include": true, "reason": "import numpy", "num_tokens": 1676}
|
import torch
import torchvision
from torch import nn
import logging
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import time
import os
import copy
import logging
import sys
sys.path.append('../')
from Model.Res_Seg import Res_Seg,get_norm_layer,init_weights
from Data.get_segmentation_data import get_dataloader
from Model.loss import CrossEntropyLoss2d_unet as CrossEntropyLoss2d
from Train_unet_module import train_model
from Options.RSeg_options import *
#config = res_seg_v1
config = eval(sys.argv[1])
print("----Config name: %s-----------batch_size: %d----------"%(config.name,config.batch_size))
gpu_ids = config.device_ids
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=config.norm)
model = Res_Seg(input_nc=config.input_nc, output_nc=config.output_nc, n_blocks=config.n_blocks,\
gpu_ids=config.device_ids,use_dropout=config.use_dropout,n_downsampling=config.n_downsampling)
init_weights(model, init_type=config.init_type)
device_ids=config.device_ids
device = torch.device('cuda:{}'.format(','.join([str(i) for i in device_ids])) \
if torch.cuda.device_count()>0 else torch.device('cpu'))
model_ft = nn.DataParallel(model, device_ids, dim=0)
model_ft.to(device)
criterion = CrossEntropyLoss2d()
# Observe that all parameters are being optimized
if config.optim == 'sgd':
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
else:
optimizer_ft = optim.RMSprop(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=config.milestones, gamma=0.1, )
dataloaders = {'train':get_dataloader(batch_size=config.batch_size,\
root_dir = config.train_path_sample,\
mask_dir=config.train_path_mask,num_workers=config.num_workers),
'val':get_dataloader(batch_size= config.batch_size,\
root_dir = config.test_path_sample,\
mask_dir=config.test_path_mask,num_workers=config.num_workers)}
model_ft = train_model(model_ft, dataloaders, criterion, optimizer_ft, exp_lr_scheduler,\
num_epochs=config.num_epochs,save_epoch=config.save_epoch,\
display_size=config.display_size,save_path= config.save_path)
|
{"hexsha": "41cdf250e495b8863bcc843916461e5c126a64db", "size": 2430, "ext": "py", "lang": "Python", "max_stars_repo_path": "Train/Train_res_seg.py", "max_stars_repo_name": "giussepi/cyto_CRLM", "max_stars_repo_head_hexsha": "4489d5d81c4270ec7b6048ceb2f2a02bfa699177", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Train/Train_res_seg.py", "max_issues_repo_name": "giussepi/cyto_CRLM", "max_issues_repo_head_hexsha": "4489d5d81c4270ec7b6048ceb2f2a02bfa699177", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-24T18:11:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:16:18.000Z", "max_forks_repo_path": "Train/Train_res_seg.py", "max_forks_repo_name": "giussepi/cyto_CRLM", "max_forks_repo_head_hexsha": "4489d5d81c4270ec7b6048ceb2f2a02bfa699177", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2876712329, "max_line_length": 111, "alphanum_fraction": 0.7139917695, "include": true, "reason": "import numpy", "num_tokens": 520}
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from tqdm import tqdm
import swalign
from multiprocessing import Process, Queue
from utils.color import getRandomColor
from utils.manager import PathManager
from utils.file import dumpJson, loadJson, dumpIterable
from preliminaries.embedding import aggregateApiSequences
from utils.magic import magicSeed, sample, nRandom
from utils.timer import StepTimer
from utils.stat import calBeliefeInterval
k = 10
qk = 5
n = 10
N = 20
def apiCluster(dict_path, map_dump_path, cluster_num=26):
api_mat = np.load(dict_path, allow_pickle=True)
# pca = TSNE(n_components=2)
# de_api_mat = pca.fit_transform(api_mat)
# colors = getRandomColor(26, more=False)
print("Clustering...")
km = KMeans(n_clusters=cluster_num).fit(api_mat)
km_wrapper = {i:int(c) for i,c in enumerate(km.labels_)}
dumpJson(km_wrapper, map_dump_path)
# plt.figure(figsize=(15,12))
# for i,item in enumerate(de_api_mat):
# plt.scatter(item[0], item[1], color=colors[km.labels_[i]])
# plt.scatter(de_api_mat[:,0], de_api_mat[:,1])
# plt.show()
###############################################################
# 利用API聚类结果,API下标映射和转化后的字符串序列,将每个样本转化为A-Z的
# 字符序列,便于运行MSA。最终生成的是一个json文件,该文件中同类样本相邻
###############################################################
def convertApiCategory(clst_path, word_map_path, json_path, str_dump_path, max_len=300):
word_map = loadJson(word_map_path)
cluster_map = loadJson(clst_path)
seqs = aggregateApiSequences(json_path, is_class_dir=True)
str_mat = []
for seq in seqs:
seq = seq[:max_len]
s = ""
for idx in seq:
api_idx = str(word_map[idx])
s += chr(65+cluster_map[api_idx])
str_mat.append(s)
dumpIterable(str_mat, title="strings", path=str_dump_path)
def align(s1, s2, out):
match = 2
mismatch = -1
scoring = swalign.NucleotideScoringMatrix(match, mismatch)
# This sets up the aligner object. You must set your scoring matrix, but
# you can also choose gap penalties, etc...
sw = swalign.LocalAlignment(scoring)
# Using your aligner object, calculate the alignment between
# ref (first) and query (second)
alignment = sw.align(s1, s2)
return alignment.identity
# def scoreEpisodeAlignment(str_path, epoch=1000, log_path=None, verbose=False,
# acc_dump_path=None):
def scoreEpisodeAlignment(matrix, acc_queue, process_id, epoch=1000, log_path=None, verbose=False):
# if acc_dump_path is not None:
# if not os.path.exists(acc_dump_path):
# dumpIterable([], "acc", acc_dump_path)
# acc_sum = loadJson(acc_dump_path)['acc']
# else:
# acc_sum = []
# matrix = loadJson(str_path)['strings']
class_pool = list(range(len(matrix) // N))
item_pool = set(range(N))
out = sys.stdout if log_path is None else open(log_path, "w")
tm = StepTimer(epoch)
tm.begin()
for i in range(epoch):
print("Process", process_id, ":", "Epoch", i)
supports = []
queries = []
task_seed = magicSeed()
sampling_seed = magicSeed()
class_seeds = nRandom(n, sampling_seed)
label_space = sample(class_pool, n, task_seed)
for cls,cseed in zip(label_space, class_seeds):
support_idxes = sample(item_pool, k, cseed, return_set=True)
query_idxes = sample(item_pool.difference(support_idxes), qk, cseed, return_set=True)
support_idxes = np.array(list(support_idxes)) + N*cls
query_idxes = np.array(list(query_idxes)) + N*cls
supports += [matrix[i] for i in support_idxes]
queries += [matrix[i] for i in query_idxes]
correct_count = 0
for qi,query in enumerate(queries):
scores = []
for si,support in enumerate(supports):
if verbose:
print("Process", process_id, ":", qi*n*k+si, "/", n*qk*k*n)
scores.append(align(support, query, out))
scores = np.array(scores).reshape(n,k).sum(-1)
predict = np.argmax(scores)
correct_count += (predict==(qi//qk))
epoch_acc = correct_count / (n*qk)
acc_queue.put(epoch_acc)
# acc_sum.append(epoch_acc)
# if acc_dump_path is not None:
# dumpIterable(acc_sum, "acc", acc_dump_path)
# print("acc=", epoch_acc)
tm.step()
# print("\n*********************************************")
# print("Avg acc: ", sum(acc_sum)/epoch)
# print("Total time:", tm.step(prt=False,end=True))
# print("95%% belief interval:", calBeliefeInterval(acc_sum))
#
# if log_path is not None:
# out.close()
def multi_process_align(str_path, epoch=1000, log_path=None, verbose=False,
acc_dump_path=None, process_num=3):
if acc_dump_path is not None:
if not os.path.exists(acc_dump_path):
dumpIterable([], "acc", acc_dump_path)
acc_sum = loadJson(acc_dump_path)['acc']
else:
acc_sum = []
matrix = loadJson(str_path)['strings']
queue = Queue()
tm = StepTimer()
tm.begin()
process_pool = []
for i in range(process_num):
p = Process(target=scoreEpisodeAlignment, args=(matrix,queue,i+1,epoch//process_num))
process_pool.append(p)
p.start()
count = 0
while count < epoch:
cur_acc = queue.get(block=True)
count += 1
print("#", count, "acc=", cur_acc)
acc_sum.append(cur_acc)
if acc_dump_path is not None:
dumpIterable(acc_sum, "acc", acc_dump_path)
for p in process_pool:
p.join()
# while not queue.empty():
# acc_sum.append(queue.get())
print("\n*********************************************")
print("Avg acc: ", sum(acc_sum)/epoch)
print("Total time:", tm.step(prt=False,end=True))
print("95%% belief interval:", calBeliefeInterval(acc_sum))
# def genFamilyProtoByMSA(str_path, work_space, proto_dump_path):
# protos = {}
# strs = loadJson(str_path)['strings']
#
# for i in range(0,len(strs)-1,N):
# print(i,"->",i+N)
# fa_strs = strs[i:i+N]
# with open(work_space+"family_"+str(i//N+1)+"_input.txt", "w") as f:
# try:
# for j in range(N):
# f.write(f"> {j+1}\n")
# f.write(fa_strs[j]+"\n")
# except Exception as e:
# print(f"len={len(fa_strs)} i={i} j={j} msg={str(e)}")
# raise RuntimeError
if __name__ == '__main__':
mng = PathManager("HKS-api")
# apiCluster(mng.WordEmbedMatrix(), mng.DataRoot()+"CategoryMapping.json")
# convertApiCategory(clst_path=mng.DataRoot()+"CategoryMapping.json",
# word_map_path=mng.WordIndexMap(),
# json_path=mng.DatasetBase()+'all-rmsub/',
# str_dump_path=mng.DataRoot()+"CategorizedStringData(rmsub).json")
# genFamilyProtoByMSA(str_path=mng.DataRoot()+"CategorizedStringData.json",
# work_space="D:/datasets/virushare-20-original/data/family_protos/",
# proto_dump_path=mng.DataRoot()+"FamilyProtos.txt")
# scoreEpisodeAlignment(str_path=mng.DataRoot()+"CategorizedStringData(rmsub).json",
# epoch=300,
# log_path=mng.DataRoot()+'logs/runlog.txt',
# acc_dump_path=mng.DataRoot()+"logs/Align-Virushare20-%dshot-%dway.json"%(k,n))
multi_process_align(str_path=mng.DataRoot()+"CategorizedStringData(rmsub).json",
epoch=1000,
acc_dump_path=mng.DataRoot()+"logs/Align-HKS-%dshot-%dway.json"%(k,n),
process_num=4)
# accs = loadJson(mng.DataRoot()+"logs/Align-HKS-%dshot-%dway.json"%(k,n))['acc']
# print("Avg acc:", sum(accs)/len(accs))
# print("Interval:", calBeliefeInterval(accs))
# print("Len:", len(accs))
|
{"hexsha": "769593451cd9a85df84017573399ec6c04cd8a30", "size": 8213, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/alignment.py", "max_stars_repo_name": "Asichurter/APISeqFewShot", "max_stars_repo_head_hexsha": "b4b7843da1f53cdc1d1711537c31305e7d5c6555", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-05-14T19:29:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T03:29:51.000Z", "max_issues_repo_path": "baselines/alignment.py", "max_issues_repo_name": "Asichurter/APISeqFewShot", "max_issues_repo_head_hexsha": "b4b7843da1f53cdc1d1711537c31305e7d5c6555", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "baselines/alignment.py", "max_forks_repo_name": "Asichurter/APISeqFewShot", "max_forks_repo_head_hexsha": "b4b7843da1f53cdc1d1711537c31305e7d5c6555", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8008474576, "max_line_length": 106, "alphanum_fraction": 0.6006331426, "include": true, "reason": "import numpy", "num_tokens": 2087}
|
[STATEMENT]
lemma dagger_slide_var1_eq: "x\<^sup>\<dagger> \<cdot> x = x \<cdot> x\<^sup>\<dagger>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x\<^sup>\<dagger> \<cdot> x = x \<cdot> x\<^sup>\<dagger>
[PROOF STEP]
by (metis local.dagger_unfoldl_distr local.dagger_unfoldr_eq local.distrib_left local.mult_1_right mult_assoc)
|
{"llama_tokens": 132, "file": "Kleene_Algebra_Conway", "length": 1}
|
import numpy as np
import xarray
def gaussian2d(pg: xarray.Dataset, Qpeak: float, Qbackground: float) -> np.ndarray:
mlon_mean = pg.mlon.mean().item()
mlat_mean = pg.mlat.mean().item()
if "mlon_sigma" in pg.attrs and "mlat_sigma" in pg.attrs:
Q = (
Qpeak
* np.exp(-((pg.mlon.data[:, None] - mlon_mean) ** 2) / (2 * pg.mlon_sigma ** 2))
* np.exp(-((pg.mlat.data[None, :] - mlat_mean) ** 2) / (2 * pg.mlat_sigma ** 2))
)
elif "mlon_sigma" in pg.attrs:
Q = Qpeak * np.exp(-((pg.mlon.data[:, None] - mlon_mean) ** 2) / (2 * pg.mlon_sigma ** 2))
elif "mlat_sigma" in pg.attrs:
Q = Qpeak * np.exp(-((pg.mlat.data[None, :] - mlat_mean) ** 2) / (2 * pg.mlat_sigma ** 2))
else:
raise LookupError("precipation must be defined in latitude, longitude or both")
Q[Q < Qbackground] = Qbackground
return Q
|
{"hexsha": "57d517cc796b064c28fb50a0f335cf3d4ccf2431", "size": 905, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gemini3d/particles/gaussian2d.py", "max_stars_repo_name": "qwsae10/pygemini", "max_stars_repo_head_hexsha": "adc6b2401ac9fc8b7cb1fc8870322f730a3383a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-08T15:59:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-22T02:49:11.000Z", "max_issues_repo_path": "src/gemini3d/particles/gaussian2d.py", "max_issues_repo_name": "qwsae10/pygemini", "max_issues_repo_head_hexsha": "adc6b2401ac9fc8b7cb1fc8870322f730a3383a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-02-11T19:32:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-13T19:55:29.000Z", "max_forks_repo_path": "src/gemini3d/particles/gaussian2d.py", "max_forks_repo_name": "qwsae10/pygemini", "max_forks_repo_head_hexsha": "adc6b2401ac9fc8b7cb1fc8870322f730a3383a3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-19T23:31:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T23:31:47.000Z", "avg_line_length": 34.8076923077, "max_line_length": 98, "alphanum_fraction": 0.5690607735, "include": true, "reason": "import numpy", "num_tokens": 291}
|
import numpy as np
import theano
import theano.tensor as T
import q_learning
class SGDRegressor:
def __init__(self, D):
print("Hello Theano!")
w = np.random.randn(D) / np.sqrt(D)
self.w = theano.shared(w)
self.lr = 0.1
X = T.matrix('X')
Y = T.vector('Y')
Y_hat = X.dot(self.w)
delta = Y - Y_hat
cost = delta.dot(delta)
grad = T.grad(cost, self.w)
updates = [(self.w, self.w - self.lr*grad)]
self.train_op = theano.function(
inputs=[X, Y],
updates=updates,
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
)
def partial_fit(self, X, Y):
self.train_op(X, Y)
def predict(self, X):
return self.predict_op(X)
if __name__ == '__main__':
q_learning.SGDRegressor = SGDRegressor
q_learning.main()
|
{"hexsha": "96e048ef04a69f5a8b1914013d318bba45a54881", "size": 813, "ext": "py", "lang": "Python", "max_stars_repo_path": "udemy/lazyprogrammer/deep-reinforcement-learning-python/cartpole/theano_warmup.py", "max_stars_repo_name": "balazssimon/ml-playground", "max_stars_repo_head_hexsha": "c2eba497bebc53e5a03807bdd8873c55f0ec73e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "udemy/lazyprogrammer/deep-reinforcement-learning-python/cartpole/theano_warmup.py", "max_issues_repo_name": "balazssimon/ml-playground", "max_issues_repo_head_hexsha": "c2eba497bebc53e5a03807bdd8873c55f0ec73e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "udemy/lazyprogrammer/deep-reinforcement-learning-python/cartpole/theano_warmup.py", "max_forks_repo_name": "balazssimon/ml-playground", "max_forks_repo_head_hexsha": "c2eba497bebc53e5a03807bdd8873c55f0ec73e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.8292682927, "max_line_length": 47, "alphanum_fraction": 0.6113161132, "include": true, "reason": "import numpy,import theano", "num_tokens": 230}
|
# code pour parser in
import numpy as np
INSTANCES_DIR = "instances"
instances = [
"{}/a_example.in".format(INSTANCES_DIR),
"{}/b_should_be_easy.in".format(INSTANCES_DIR),
"{}/c_no_hurry.in".format(INSTANCES_DIR),
"{}/d_metropolis.in".format(INSTANCES_DIR),
"{}/e_high_bonus.in".format(INSTANCES_DIR)]
def manhattan(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
class Ride:
def __init__(self, id_ride, init, end, earliest, latests):
self.id_ride = id_ride
self.start = init
self.finish = end
self.earliest = earliest
self.latest = latests
self.distance_trajet = manhattan(init, end)
def __repr__(self):
return str(self.__dict__)
def distance_totale(self, pos_vehicule):
return manhattan(pos_vehicule, self.start) + self.distance_trajet
def gains_possibles(self, t_debut, t_fin, B):
total = 0
if t_fin <= self.latests:
total += manhattan(self.start, self.finish)
if t_debut == self.earliest:
total += B
return total
def get_score_solution(solution):
if not solution_valide(solution):
return -np.inf
def read(file_in):
"nb_vehic, [clients], temps, bonus"
f = open(file_in, 'r')
txt = f.read()
f.close()
L = txt.split(chr(10))
R, C, F, N, B, T = map(int, L[0].split())
rides = []
for i, t in enumerate(L[1:]):
try:
a, b, x, y, s, f = map(int, t.split())
rides.append(Ride(i, (a, b), (x, y), s, f))
except:
pass
return F, rides, T, B
def write(file_out, L):
"[(id, [trajets])]"
f = open(file_out, 'w')
L = sorted(L)
for i, trajets in L:
s = str(len(trajets)) + ' ' + ' '.join(map(str, trajets)) + chr(10)
f.write(s)
f.close()
def parse_machines(file="machines_list.txt"):
with open(file) as file:
return [machine for machine in file.read().split("\n")if machine]
def compute_score(file):
pass
def LANCE_LE_PROGRAMME_BORDEL():
import glouton
for i in instances:
F, rides, T, B = read(i)
s = glouton.v_first(F, rides, T, B)
write(i[:-3] + ".out", s)
if __name__ == '__main__':
LANCE_LE_PROGRAMME_BORDEL()
|
{"hexsha": "eb921a4a93e3abbe507b47b1e8e63331f43b95b1", "size": 2370, "ext": "py", "lang": "Python", "max_stars_repo_path": "Contests/2018-03-01 - Google Hash Code 2018/parser.py", "max_stars_repo_name": "NicolasBizzozzero/Competitive-Programming", "max_stars_repo_head_hexsha": "53ece89623b4aca542d096b2aff18be35b19da6d", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-11T17:19:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-23T12:07:38.000Z", "max_issues_repo_path": "Contests/2018-03-01 - Google Hash Code 2018/parser.py", "max_issues_repo_name": "NicolasBi/Competitive-Programming", "max_issues_repo_head_hexsha": "53ece89623b4aca542d096b2aff18be35b19da6d", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Contests/2018-03-01 - Google Hash Code 2018/parser.py", "max_forks_repo_name": "NicolasBi/Competitive-Programming", "max_forks_repo_head_hexsha": "53ece89623b4aca542d096b2aff18be35b19da6d", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9473684211, "max_line_length": 76, "alphanum_fraction": 0.55907173, "include": true, "reason": "import numpy", "num_tokens": 675}
|
import cv2 as cv
import dlib
import numpy as np
import scipy.io as sio
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from config import device
from misc import ensure_folder
from utils.ddfa import ToTensorGjz, NormalizeGjz, _parse_param
from utils.estimate_pose import parse_pose
from utils.inference import predict_68pts, predict_dense, dump_to_ply, get_suffix, get_colors, \
write_obj_with_colors, crop_img, parse_roi_box_from_landmark, parse_roi_box_from_bbox
if __name__ == '__main__':
# checkpoint = 'BEST_checkpoint.tar'
# print('loading {}...'.format(checkpoint))
# checkpoint = torch.load(checkpoint)
# model = checkpoint['model'].module
filename_scripted = '3ddfa_scripted.pt'
model = torch.jit.load(filename_scripted)
cudnn.benchmark = True
model = model.to(device)
model.eval()
face_detector = dlib.get_frontal_face_detector()
dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
face_regressor = dlib.shape_predictor(dlib_landmark_model)
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
# filename = 'images/0008.png'
filename = 'images/jinguanzhang.jpg'
img_ori = cv.imread(filename)
rects = face_detector(img_ori, 1)
rect = rects[0]
# bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()]
# print('bbox: ' + str(bbox))
# roi_box = parse_roi_box_from_bbox(bbox)
# print('roi_box: ' + str(roi_box))
# - use landmark for cropping
pts = face_regressor(img_ori, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
roi_box = parse_roi_box_from_landmark(pts)
img = crop_img(img_ori, roi_box)
img = cv.resize(img, (120, 120), interpolation=cv.INTER_LINEAR)
input = transform(img).unsqueeze(0)
input = input.to(device)
with torch.no_grad():
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
print('param: ' + str(param))
p, offset, alpha_shp, alpha_exp = _parse_param(param)
print('alpha_exp: ' + str(alpha_exp))
# 68 pts
# bbox = [0, 0, 120, 120]
# roi_box = parse_roi_box_from_bbox(bbox)
pts68 = predict_68pts(param, roi_box)
# print('pts68: ' + str(pts68))
print('pts68.shape: ' + str(pts68.shape))
P, pose = parse_pose(param)
# print('P: ' + str(P))
print('P.shape: ' + str(P.shape))
print('pose: ' + str(pose))
vertices = predict_dense(param, roi_box)
# print('vertices: ' + str(vertices))
print('vertices.shape: ' + str(vertices.shape))
ensure_folder('result')
suffix = get_suffix(filename)
print('suffix: ' + suffix)
tri = sio.loadmat('visualize/tri.mat')['tri']
dump_to_ply(vertices, tri, '{}.ply'.format(filename.replace(suffix, '')))
wfp = '{}.obj'.format(filename.replace(suffix, ''))
colors = get_colors(img_ori, vertices)
write_obj_with_colors(wfp, vertices, tri, colors)
print('Dump obj with sampled texture to {}'.format(wfp))
|
{"hexsha": "7c4557b4774d2ce66aa1457db6a7c7c79a60c6a4", "size": 3060, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo.py", "max_stars_repo_name": "foamliu/3DDFA-v2", "max_stars_repo_head_hexsha": "addadf3fdbf39d1d6ad2d8913fc9f87c7c66b53f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-20T06:13:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-31T13:05:23.000Z", "max_issues_repo_path": "demo.py", "max_issues_repo_name": "foamliu/3DDFA-v2", "max_issues_repo_head_hexsha": "addadf3fdbf39d1d6ad2d8913fc9f87c7c66b53f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-20T06:33:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-25T10:35:07.000Z", "max_forks_repo_path": "demo.py", "max_forks_repo_name": "foamliu/3DDFA-v2", "max_forks_repo_head_hexsha": "addadf3fdbf39d1d6ad2d8913fc9f87c7c66b53f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-19T11:45:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-04T09:23:53.000Z", "avg_line_length": 34.3820224719, "max_line_length": 96, "alphanum_fraction": 0.6826797386, "include": true, "reason": "import numpy,import scipy", "num_tokens": 823}
|
import os.path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import pyplot
from pandas.api.types import is_string_dtype
from sklearn import metrics
from sklearn.decomposition import PCA
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier, RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from dataset_creation import read_data
# Options for pandas -----
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
accuracy = []
precision = []
recall = []
f1 = []
model_names = []
def read_data_from_questionnaire():
# reads data/category_columns_dataset.csv
dataset = pd.read_csv("final_questionnaire.csv",
sep=',',
header=0,
skiprows=0)
print('Dataset size is: ', len(dataset))
print(dataset.head(5))
#Convert "Ναι / Οχι" responses in 0-1
dataset['follow'] = dataset['follow'].apply(lambda x: 0 if x=='Όχι' else 1)
# Convert "Άντρας / Γυναίκα" responses in m-f
dataset['gender'] = dataset['gender'].apply(lambda x: 0 if x == 'Άντρας' else 1)
print(dataset.head(5))
print(dataset.columns)
#Drop name, reasons & category columns
dataset.drop(['name', 'reasons', 'category'], axis='columns', inplace=True)
print(dataset.shape)
#Convert all string values to numeric
for x in dataset.columns:
dataset[x] = pd.to_numeric(dataset[x], errors='coerce')
# Drop rows with age <0 and >100
dataset.drop(dataset[dataset['age'] < 0].index, inplace=True)
dataset.drop(dataset[dataset['age'] > 100].index, inplace=True)
print(dataset.shape)
# Drop examples (if any) that may contain NaN features
# ---------------------------------------------------------------
dataset.dropna(inplace=True)
print(dataset.shape)
#dataset.drop(['age'])
return dataset;
def preprocess_data_from_mongo(df):
for i in df.columns:
if is_string_dtype(df[i]):
df[i] = df[i].map(lambda x: str(x).replace('%',''))
df[i] = df[i].map(lambda x: str(x).replace('--', '0'))
df[i] = df[i].map(lambda x: str(x).replace(',', ''))
# Convert all string values to numeric, except the category column
if i != 'category':
df[i] = pd.to_numeric(df[i], errors='coerce')
# Convert continuous follow_probability to 0-1
df['follow_probability'] = df['follow_probability'].apply(lambda x: 1 if x >= 0.5 else 0)
# Drop examples (if any) that may contain NaN features
# ---------------------------------------------------------------
df.dropna(inplace=True)
#print(df.shape)
#print(df.head(5))
return df
def print_scores(y_true, y_pred, model_name):
print("Results with ",model_name)
print("Accuracy: {:.5f}".format(metrics.accuracy_score(y_true, y_pred)))
print("Precision: {:.5f}".format(metrics.precision_score(y_true, y_pred)))
print("Recall: {:.5f}".format(metrics.recall_score(y_true, y_pred)))
print("F1: {:.5f}".format(metrics.f1_score(y_true, y_pred)))
accuracy.append(metrics.accuracy_score(y_true, y_pred))
precision.append(metrics.precision_score(y_true, y_pred))
recall.append(metrics.recall_score(y_true, y_pred))
f1.append(metrics.f1_score(y_true, y_pred))
model_names.append(model_name)
def plot_results(name):
x = np.arange(len(model_names))
width = 0.2 # the width of the bars
X_axis = np.arange(len(model_names))
plt.bar(X_axis - 0.3, accuracy, width, label='Accuracy', color='red')
plt.bar(X_axis - 0.1, precision, width, label='Precision', color='purple')
plt.bar(X_axis + 0.1, recall, width, label='Recall')
plt.bar(X_axis + 0.3, f1, width, label='F1')
plt.xticks(X_axis, model_names, rotation=45)
# Pad margins so that markers don't get clipped by the axes
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.xlabel("Models")
plt.ylabel("Scores")
plt.title("Machine Learning Model Scores")
plt.legend()
plt.savefig(name + '.png')
plt.show()
def fit_predict(x_train, x_test, model):
model.fit(x_train)
y_predicted = model.predict(x_test)
return y_predicted
def make_prediction(dataset, prediction_type):
print(dataset.head(2))
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=2)
# Scale all the data with MinMaxScaler
# ---------------------------------------------------------------
scaler = MinMaxScaler()
x_train_s = scaler.fit_transform(x_train)
x_test_s = scaler.transform(x_test)
if prediction_type == 'mongo_info':
pca = PCA(0.95)
pca.fit(x_train_s)
x_train_final = pca.transform(x_train_s)
x_test_final = pca.transform(x_test_s)
else:
x_train_final = x_train_s
x_test_final = x_test_s
if prediction_type == 'questionnaire_info':
# Naive Bayes
# ---------------------------------------------------------------
mnb = MultinomialNB().fit(x_train, y_train)
y_predicted = mnb.predict(x_test_final)
print_scores(y_test, y_predicted, "MultinomialNB")
print("score on train: ", str(mnb.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Logistic Regression
# ---------------------------------------------------------------
grid = {"C": np.logspace(-3, 3, 7), "tol": [1e-2, 1e-3, 1e-4, 1e-5], "penalty": ["l1", "l2"], "solver": ["saga"], "max_iter": [5000]}
lr = GridSearchCV(LogisticRegression(), param_grid=grid)
lr.fit(x_train_final, y_train)
y_predicted = lr.predict(x_test_final)
print_scores(y_test, y_predicted, "Logistic Regression")
print("score on train: ", str(lr.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# K Neighbors Classifier
# ---------------------------------------------------------------
knn = KNeighborsClassifier(algorithm='brute', n_jobs=-1)
knn.fit(x_train_final, y_train)
y_predicted = knn.predict(x_test_final)
print_scores(y_test, y_predicted, "K Neighbors Classifier")
print("score on train: ", str(knn.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Support Vector Machine
# ---------------------------------------------------------------
param_grid = {'C': [0.1, 1, 10, 100, 1000],
'gamma': [1, 0.1, 0.01, 0.001, 0.0001],
'kernel': ['rbf']}
svm = GridSearchCV(SVC(), param_grid)
svm.fit(x_train_final, y_train)
y_predicted = svm.predict(x_test_final)
print_scores(y_test, y_predicted, "Support Vector Machine")
print("score on train: " + str(svm.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Decision Tree Classifier
# ---------------------------------------------------------------
clf = DecisionTreeClassifier()
clf.fit(x_train_final, y_train)
y_predicted = clf.predict(x_test_final)
print_scores(y_test, y_predicted, "Decision Tree Classifier")
print("score on train: " + str(clf.score(x_train_final, y_train)), "\n")
print(clf.feature_importances_)
# ---------------------------------------------------------------
# Bagging Decision Tree
# ---------------------------------------------------------------
# max_samples: maximum size 0.5=50% of each sample taken from the full dataset
# max_features: maximum of features 1=100% taken here all 10K
# n_estimators: number of decision trees
bg = BaggingClassifier(DecisionTreeClassifier(), max_samples=0.5, max_features=1.0, n_estimators=10)
bg.fit(x_train_final, y_train)
y_predicted = bg.predict(x_test_final)
print_scores(y_test, y_predicted, "Bagging Decision Tree")
print("score on train: " + str(bg.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Boosting Decision Tree
# ---------------------------------------------------------------
adb = AdaBoostClassifier(DecisionTreeClassifier(min_samples_split=10, max_depth=4), n_estimators=10,
learning_rate=0.6)
adb.fit(x_train_final, y_train)
y_predicted = adb.predict(x_test_final)
print_scores(y_test, y_predicted, "Boosting Decision Tree")
print("score on train: " + str(adb.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
# Random Forest Classifier
# ---------------------------------------------------------------
# n_estimators = number of decision trees
rf = RandomForestClassifier(n_estimators=30, max_depth=9)
rf.fit(x_train_final, y_train)
y_predicted = rf.predict(x_test_final)
print_scores(y_test, y_predicted, "Random Forest Classifier")
print("score on train: " + str(rf.score(x_train_final, y_train)), "\n")
# perform Random Forest Build-in importance
importance = rf.feature_importances_
# summarize feature importance
importanceDict = {}
listImp = []
print('Random Forest Build-in importance')
for i, v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i, v))
listImp.append(v)
importanceDict[v] = i
print()
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
pyplot.show()
# ---------------------------------------------------------------
if prediction_type == 'questionnaire_info':
# Voting Classifier
# ---------------------------------------------------------------
# 1) naive bias = mnb
# 2) logistic regression =lr
# 3) random forest =rf
# 4) support vector machine = svm
evc = VotingClassifier(estimators=[('mnb', mnb), ('lr', lr), ('rf', rf), ('svm', svm)], voting='hard')
evc.fit(x_train_final, y_train)
y_predicted = evc.predict(x_test_final)
print_scores(y_test, y_predicted, "Voting Classifier")
print("score on train: " + str(evc.score(x_train_final, y_train)), "\n")
# ---------------------------------------------------------------
if __name__ == '__main__':
#Make prediction on the questionnaire
print("Machine learning methods: Questionnaire")
dataset_quest = read_data_from_questionnaire()
make_prediction(dataset_quest,'questionnaire_info')
plot_results("questionnaire")
accuracy = []
precision = []
recall = []
f1 = []
model_names = []
# Make prediction on the crowdtangle's info
print("Machine learning methods: Crowdtangle")
if os.path.isfile('prediction_info.csv'):
dataset_mongo_updated = pd.read_csv("prediction_info.csv")
pass
elif os.path.isfile('questionnaire.csv'):
dataset_mongo = read_data.get_influencer_info()
dataset_mongo_updated = preprocess_data_from_mongo(dataset_mongo)
dataset_mongo_updated.to_csv('prediction_info.csv', index=False)
make_prediction(dataset_mongo_updated, 'mongo_info')
plot_results("crowdtangle")
|
{"hexsha": "d480a7c5bfb641ab06cfea4b9f1888c1ce5e65f5", "size": 11758, "ext": "py", "lang": "Python", "max_stars_repo_path": "predictions/prediction.py", "max_stars_repo_name": "alfagama/INstagramINfluencers", "max_stars_repo_head_hexsha": "cd455f5e0d2c80629fe3a29229f9308646252dfe", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predictions/prediction.py", "max_issues_repo_name": "alfagama/INstagramINfluencers", "max_issues_repo_head_hexsha": "cd455f5e0d2c80629fe3a29229f9308646252dfe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-22T07:20:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-22T07:20:06.000Z", "max_forks_repo_path": "predictions/prediction.py", "max_forks_repo_name": "alfagama/INstagramINfluencers", "max_forks_repo_head_hexsha": "cd455f5e0d2c80629fe3a29229f9308646252dfe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-29T13:38:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T13:19:01.000Z", "avg_line_length": 39.3244147157, "max_line_length": 137, "alphanum_fraction": 0.5895560469, "include": true, "reason": "import numpy", "num_tokens": 2727}
|
[STATEMENT]
lemma param_foldli[param]: "(foldli, foldli)
\<in> \<langle>Re\<rangle>list_rel \<rightarrow> (Rs\<rightarrow>Id) \<rightarrow> (Re\<rightarrow>Rs\<rightarrow>Rs) \<rightarrow> Rs \<rightarrow> Rs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (foldli, foldli) \<in> \<langle>Re\<rangle>list_rel \<rightarrow> (Rs \<rightarrow> bool_rel) \<rightarrow> (Re \<rightarrow> Rs \<rightarrow> Rs) \<rightarrow> Rs \<rightarrow> Rs
[PROOF STEP]
unfolding foldli_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (rec_list (\<lambda>c f \<sigma>. \<sigma>) (\<lambda>x xs xsa c f \<sigma>. if c \<sigma> then xsa c f (f x \<sigma>) else \<sigma>), rec_list (\<lambda>c f \<sigma>. \<sigma>) (\<lambda>x xs xsa c f \<sigma>. if c \<sigma> then xsa c f (f x \<sigma>) else \<sigma>)) \<in> \<langle>Re\<rangle>list_rel \<rightarrow> (Rs \<rightarrow> bool_rel) \<rightarrow> (Re \<rightarrow> Rs \<rightarrow> Rs) \<rightarrow> Rs \<rightarrow> Rs
[PROOF STEP]
by parametricity
|
{"llama_tokens": 360, "file": "Automatic_Refinement_Parametricity_Param_HOL", "length": 2}
|
import sys
import math
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
import numpy as np
## default settings
MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent
MAX_INITS = 2 # number of restarts
ABORT_EARLY = True # if we can't improve anymore, abort early
LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results
TARGETED = True # should we target one specific class? or just be wrong?
CONFIDENCE = 0 # how strong the adversarial example should be
INITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess
POSITIVE_MEAN = 0 # 0: free mean; 1: biased penalty; 2: softplus; 3: clipping
VARIANCE = .1
NUM_ROWS = 1
NUM_COLUMNS = 1
class ManRGB:
def __init__(self, sess, model, batch_size=1, confidence=CONFIDENCE,
targeted=TARGETED, learning_rate=LEARNING_RATE,
max_iterations=MAX_ITERATIONS, abort_early=ABORT_EARLY,
initial_const=INITIAL_CONST, max_inits = MAX_INITS,
positive_mean=POSITIVE_MEAN, variance=VARIANCE, num_rows=NUM_ROWS,
num_columns=NUM_COLUMNS, channel=None):
self.sess = sess
self.ABORT_EARLY = abort_early
self.MAX_ITERATIONS = max_iterations
self.MAX_INITS = max_inits
self.TARGETED = targeted
image_size = model.image_size
num_channels = model.num_channels
num_labels = model.num_labels
assert image_size % num_rows == 0
assert image_size % num_columns == 0
cell_width = int(image_size / num_columns)
cell_height = int(image_size / num_rows)
# to make the following code shorter...
sample_shape = (batch_size, image_size, image_size, num_channels)
sample_shape = np.asarray(sample_shape)
variable_shape = (num_rows, num_columns, num_channels)
cell_shape = (batch_size, cell_width, cell_height)
# this way is more efficient in sending data to tf every iteration
self.assign_tlab = tf.placeholder(tf.float32, shape=[num_labels])
self.assign_timg = tf.placeholder(tf.float32, shape=sample_shape[1:])
self.tlab = tf.Variable(np.zeros(num_labels), dtype=tf.float32)
self.timg = tf.Variable(np.zeros(sample_shape[1:]), dtype=tf.float32)
init_noi_mean = tf.random.uniform(dtype=tf.float32,
shape=variable_shape, minval=0., maxval=1.)
noi_mean = tf.Variable(init_noi_mean, validate_shape=False)
self.noi_mean_nonneg = tf.math.softplus(noi_mean)
def channel_model(channel, x, delta):
## channel emulation
if channel:
color_matrix = channel['color_matrix']
const_dig = channel['const_dig']
const_ana = channel['const_ana']
ana_intensity = channel['ana_intensity']
digital_intensity = channel['dig_intensity']
const_ill = channel['const_ill']
const_rho = channel['const_rho']
env_ill = channel['env_ill']
else: # y = (x_f/|x_f| + x_o)
color_matrix = np.identity(3).astype(np.float32)
const_dig, const_ana, ana_intensity = 0., 0., 0.
const_ill, env_ill, const_rho = np.inf, np.inf, 1.
digital_intensity = 1.
# we will look at only the ratio among three channels
noi_mean_max = tf.reduce_max(delta, axis=[2])[:, :, None]
noi_mean_normalized = delta / noi_mean_max * digital_intensity
# color mapping: tensor -> matrix -> tensor
shape_2d = [num_channels, num_rows * num_columns]
shape_3d = [num_channels, num_rows, num_columns]
noi_mean_mapped = tf.transpose(noi_mean_normalized, perm=[2, 0, 1])
noi_mean_mapped = tf.reshape(noi_mean_mapped, shape=shape_2d)
noi_mean_mapped = tf.matmul(color_matrix, noi_mean_mapped)
noi_mean_mapped = tf.reshape(noi_mean_mapped, shape=shape_3d)
noi_mean_mapped = tf.transpose(noi_mean_mapped, perm=[1, 2, 0])
dig_intensity = tf.reduce_mean(noi_mean_normalized)
ill = tf.sigmoid(const_dig * dig_intensity
+ const_ana * ana_intensity + const_ill)
noi_mean_boosted = noi_mean_mapped * ill * const_rho
# setup noise distributions
tfd = tfp.distributions
dis = tfd.Normal(loc=noi_mean_boosted, scale=variance)
noi = dis.sample(cell_shape)
noi = tf.transpose(noi, perm=[0, 5, 3, 2, 4, 1])
noi = tf.reshape(noi, shape=sample_shape[[0, 3, 1, 2]])
modifier = tf.transpose(noi, perm=[0, 2, 3, 1])
gamma = env_ill / (ill + env_ill)
y = gamma * (modifier + x)
return y
self.newimg = channel_model(channel, self.timg, self.noi_mean_nonneg)
self.l2dist = tf.norm(self.newimg - self.timg) # l2 distance
self.output = model.predict(self.newimg) # logits
# compute the probability of the label class versus the maximum other
self.real = tf.reduce_sum((self.tlab)*self.output, 1) # 2nd largest logits
self.other = tf.reduce_max((1-self.tlab)*self.output -
(self.tlab*10000), 1) # targeted class logits
if self.TARGETED: # optimize for making the other class most likely
self.logit_loss = tf.maximum(0.0, self.other-self.real+confidence)
else: # optimize for making this class least likely.
self.logit_loss = tf.maximum(0.0, self.real-self.other+confidence)
## losses
self.loss_mean = tf.norm(noi_mean)
self.loss_logits = tf.reduce_sum(self.logit_loss) / sample_shape[0]
self.loss = self.loss_mean + initial_const * self.loss_logits
# Setup the adam optimizer
var_list = [noi_mean]
optimizer = tf.train.AdamOptimizer(learning_rate)
self.train = optimizer.minimize(self.loss, var_list=var_list)
## Initialization
init_tlab = self.tlab.assign(self.assign_tlab)
init_timg = self.timg.assign(self.assign_timg)
init_noi = tf.variables_initializer(var_list)
init_opt = tf.variables_initializer(optimizer.variables())
self.initializer = tf.group([init_tlab, init_timg, init_noi, init_opt])
def attack(self, img, target):
max_succ_ratio = 0.
prev_logit_loss = np.inf
# begin searching from randomly chosen initial points for
# self.MAX_INITS times
for init_index in range(self.MAX_INITS):
print('No.{} init'.format(init_index))
self.sess.run(self.initializer,
feed_dict={self.assign_timg: img,
self.assign_tlab: target})
prev_loss = math.inf
for iteration in range(self.MAX_ITERATIONS):
## one iteration of the optimization
self.sess.run(self.train)
rslts = self.sess.run([
self.loss, self.l2dist,
self.output, self.newimg,
self.noi_mean_nonneg,
self.loss_mean, self.loss_logits])
## attack evaluation for each iteration
logit_loss = rslts[-1]
if logit_loss <= prev_logit_loss:
prev_logit_loss = logit_loss
best_img = rslts[3][0]
best_mean = rslts[4]
# count the hits
preds = np.argmax(rslts[2], axis=1)
origs = np.argmax(target)
if self.TARGETED:
succ_ratio = np.sum(preds == origs) / len(preds)
else:
succ_ratio = np.sum(preds != origs) / len(preds)
max_succ_ratio = succ_ratio
## DONE!
if np.isclose(logit_loss, 0):
print(iteration, 'logits_loss = 0. Done!')
#print('avg logits', np.mean(rslts[2], axis=0))
return max_succ_ratio, best_img, best_mean, logit_loss
## verbose
if iteration % (self.MAX_ITERATIONS // 10) == 0:
curr_loss, lm, ll = rslts[0], rslts[-2], logit_loss
print(iteration, succ_ratio, curr_loss, lm, ll)
# abort early if we're going nowhere
if self.ABORT_EARLY and curr_loss > prev_loss:
print('going nowhere... abort')
break
prev_loss = curr_loss
## return something even it is not 100% successful
return max_succ_ratio, best_img, best_mean, logit_loss
|
{"hexsha": "37fe02b56836ddaf1494f57faa5c3aedbaa17590", "size": 8970, "ext": "py", "lang": "Python", "max_stars_repo_path": "rgb_attack.py", "max_stars_repo_name": "Harry1993/GhostImage", "max_stars_repo_head_hexsha": "7388ab2f48d7ab8a99c15bb65ccdbee47db7de97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-12T17:54:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-12T17:54:55.000Z", "max_issues_repo_path": "rgb_attack.py", "max_issues_repo_name": "Harry1993/GhostImage", "max_issues_repo_head_hexsha": "7388ab2f48d7ab8a99c15bb65ccdbee47db7de97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rgb_attack.py", "max_forks_repo_name": "Harry1993/GhostImage", "max_forks_repo_head_hexsha": "7388ab2f48d7ab8a99c15bb65ccdbee47db7de97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.125, "max_line_length": 84, "alphanum_fraction": 0.5917502787, "include": true, "reason": "import numpy", "num_tokens": 2090}
|
export AbstractNetwork, update!, update, statespace, neighbors
"""
AbstractNetwork
A supertype for all network types.
To implement the `AbstractNetwork` interface, a concrete subtype must provide
the following methods:
* [`update!(net, dest, state)`](@ref)
* [`statespace(net)`](@ref)
* [`neighbors(net, node, dir)`](@ref)
The following methods have default definitions which are usually sufficient.
* [`Base.size(net)`](@ref) - if `net` has as `size` property
* [`update!(net, state)`](@ref) - if `update!(net, state, state)` is well-behaved
* [`neighbors(net, nodes, dir)`](@ref)
* [`neighbors(net, dir)`](@ref)
"""
abstract type AbstractNetwork end
"""
size(net::AbstractNetwork)
Return the number of nodes in `net`.
**Note**: Concrete network subtypes need not define this method if they have a
`size` property.
"""
Base.size(net::AbstractNetwork) = net.size
"""
update!(net::AbstractNetwork, dest::AbstractVector, state::AbstractVector)
update!(net::AbstractNetwork, state::AbstractVector)
Update a `state` according to the network rules specified by `net`, and store
the result in `dest`. If `dest` is not provided, the state is updated in place.
The updated state is returned.
Typically `dest` must be at least as large as `state`; however, that may vary
depending on the concrete network subtype.
**Note**: Concrete network subtypes _must_ define `update!(net, dest, state)`.
If it is safe to call `update!(net, state, state)`, then `update!(net, state)`
need not be overloaded (as that is the default definition).
"""
@unimpl update!(net::AbstractNetwork, dest::AbstractVector, state::AbstractVector)
update!(net::AbstractNetwork, state::AbstractVector) = update!(net, state, state)
"""
update(net::AbstractNetwork, state)
Update a `state` according to the network rules specified by `net`, returning
the result and leaving `state` unmodified.
**Note**: Concrete network subtypes rarely need to overload this method as a
default implementation is provided based on [`update!(net, dest,
state)`](@ref). The exception to this rule is if the size or type of `dest` is
not the same as `state` for the particular concrete network subtype.
"""
update(net::AbstractNetwork, state::AbstractVector) = update!(net, deepcopy(state), state)
"""
statespace(net::AbstractNetwork)
Return an `AbstractStateSpace` representing the state space of `net`.
**Note**: Concrete network subtypes _must_ define this method.
"""
@unimpl statespace(net::AbstractNetwork)
"""
neighbors(net::AbstractNetwork, node::Int[, dir::Symbol=:inout])
Return an array of the indicies of nodes of `net` which neighbor `node`. If
`dir` is provided it must be one of
* `:in` - only return sources of edges incoming to `node`
* `:out` - only return targets of edges outgoing from `node`
* `:inout` - return all neighbors (`:in` and/or `:out`) of `node`
**Note**: Concrete network subtypes _must_ define this method.
"""
@unimpl neighbors(net::AbstractNetwork, node::Int, dir::Symbol=:inout)
"""
neighbors(net::AbstractNetwork, nodes[, dir::Symbol=:inout])
Return a collection of arrays of neighboring nodes, one for each node in the
`nodes` collection. If `dir` is provided it must be one of `:in:`, `:out`, or
`:inout`.
**Note**: Concrete network subtypes should rarely need define this method.
"""
function neighbors(net::AbstractNetwork, nodes, dir::Symbol=:inout)
neighbors.(net, nodes, dir)
end
"""
neighbors(net::AbstractNetwork[, dir::Symbol=:inout])
Return a vector of arrays of neighboring nodes, one for each node in `net`. If
`dir` is provided it must be one of `:in:`, `:out`, or `:inout`.
**Note**: Concrete network subtypes should rarely need define this method.
"""
neighbors(net::AbstractNetwork, dir::Symbol=:inout) = neighbors.(net, 1:size(net), dir)
|
{"hexsha": "4faf7216fd17fe52a0680aeb88ba57700a4d9e35", "size": 3806, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/core.jl", "max_stars_repo_name": "hbsmith/Neet.jl", "max_stars_repo_head_hexsha": "72add45845a9fe3b330f79663af3d391fae689f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/core.jl", "max_issues_repo_name": "hbsmith/Neet.jl", "max_issues_repo_head_hexsha": "72add45845a9fe3b330f79663af3d391fae689f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-12-05T20:03:02.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-05T21:14:09.000Z", "max_forks_repo_path": "src/core.jl", "max_forks_repo_name": "hbsmith/Neet.jl", "max_forks_repo_head_hexsha": "72add45845a9fe3b330f79663af3d391fae689f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-14T07:40:45.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-14T07:40:45.000Z", "avg_line_length": 33.9821428571, "max_line_length": 90, "alphanum_fraction": 0.7207041513, "num_tokens": 950}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.