arxiv_id
stringlengths
0
16
text
stringlengths
10
1.65M
import random import math import numpy as np def del_artifical_vars(table, B, W): use_bland = True print_steps = True while len(W)!=0: # Removing aritficial variables: # If they are not in the base we cross them over non_basis = [i for i in W if i not in B] if print_steps: print() print('Table before removing base variables') for r in table: print([round(i, 2) for i in r]) print() print('Removing aritificial, non basis, vars') print('non_basis = {}'.format(non_basis)) for i in range(len(table)): table[i] = [value for index,value in enumerate(table[i]) if index not in non_basis ] # Uprading artifiicial var indexes # If we removed any variable that was smaller than w we have to remove it's index too so that # it can correspond to the table W W = [i for i in W if i not in non_basis] for i in range(len(W)): smaller_num = len([j for j in non_basis if j <W[i]]) W[i] -=smaller_num for i in range(len(B)): smaller_num = len([j for j in non_basis if j <B[i]]) B[i] -=smaller_num if print_steps: print('Table after removing non base variables') for r in table: print([round(i, 2) for i in r]) print() print('Removing artificial base variables') print('Artificial base variables = {}'.format(W)) # Removing all artificial base variables # If all are 0 in one row except 1, then we remove that artificial variable and we remove both the row # and the corresponding column # Else if such an element exists that is different that 0, we choose it and make base # after that, since that artificial variable is no longer in base we remove it just like before removed = [] for w in W: # row = B.index(w) row = np.where(B == w)[0].item() indexes = [i for i,j in enumerate(table[row][:-1]) if i!=w] nonzero = any(table[row][i]!=0 for i in indexes) if nonzero: # If there is an element that is not 0 we take any of them and make it a base by pivoting candidate = [i for i in indexes if table[row][i]!=0] ind = candidate[0] if not use_bland: ind = random.choice(candidate) table[row] = [i/table[row][ind] for i in table[row]] for i in range(len(table)): if i!=row: mult = -table[i][ind] table[i] = [i+mult*j for i,j in zip(table[i],table[row])] B[row] = ind if print_steps: print('Since artificial variable x{} has an el!=0 in the same row, we pivot around A[{}][{}]'.format(w, row, ind)) print('New base is: {}'.format(B)) print() else: # If all are 0 then we delte both the row and the column table.pop(row) for i in range(len(table)): table[i].pop(w) B = B.tolist() el = B.pop(row) B = [i if el>i else i-1 for i in B] W.remove(w) W = [i if el > i else i - 1 for i in W] B = np.array(B) if print_steps: print('Since artificial variable x{} is in a row where the base is and all are = 0 we delete the row and the column'.format(w)) if print_steps: print() print('Table after removing base variables') for r in table: print([round(i, 2) for i in r]) return table,B
from __future__ import print_function import sys import numpy as np import netCDF4 as nc from .base_grid import BaseGrid class DaitrenRunoffGrid(BaseGrid): def __init__(self, h_grid_def, description='Daitren runoff regular grid'): self.type = 'Arakawa A' self.full_name = 'Daitren_runoff' try: with nc.Dataset(h_grid_def) as f: x_t = f.variables['xc'][:] y_t = f.variables['yc'][:] clon_t = f.variables['xv'][:] clat_t = f.variables['yv'][:] mask_t = f.variables['mask'][:] area_t = f.variables['area'][:] except IOError: print('Error opening {}'.format(h_grid_def), file=sys.stderr) sys.exit(1) super(DaitrenRunoffGrid, self).__init__(x_t=x_t, y_t=y_t, clon_t=clon_t, clat_t=clat_t, mask_t=mask_t, area_t=area_t, description=description)
from keras.datasets import mnist from keras.utils import to_categorical from keras.models import Sequential from keras.models import load_model from keras.layers import Dense import sys import numpy as np import matplotlib.pyplot as plt import cv2 def testContours(img): plt.figure(figsize=[10,5]) ret, thresh = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY) img2, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) print('Contour numbers' + str(len(contours))) for c in contours: [x, y, w, h] = cv2.boundingRect(c) cv2.rectangle(img, (x, y), (x + w, y + h), (150, 150, 55), 1) # Display the first image in training data plt.subplot(121) plt.imshow(img, cmap='gray') plt.show() def getAreaOfIntrest(imgToResolve): ret, thresh = cv2.threshold(imgToResolve, 127, 255, cv2.THRESH_BINARY) img2, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contourArr = [cv2.contourArea(c) for c in contours] # Trazimo najvecu konturu if len(contourArr) == 0: return imgToResolve contourIndex = np.argmax(contourArr) [x, y, w, h] = cv2.boundingRect(contours[contourIndex]) resolvedImg = imgToResolve[y:y+h+1, x:x+w+1] resolvedImg = cv2.resize(resolvedImg, (28, 28), interpolation=cv2.INTER_AREA) return resolvedImg def prepareImages(images): for i in range(len(images)): print('\rPreparing ' + str(i) + '/' + str(len(images) - 1) , end=' ') images[i] = getAreaOfIntrest(images[i]) return images def trainNM(): (trainImages, trainLabels), (testImages, testLabels) = mnist.load_data() print('Preparing training data \n') trainImages = prepareImages(trainImages) print('Preparing test data \n') testImages = prepareImages(testImages) dimData = np.prod(trainImages.shape[1:]) trainData = trainImages.reshape(trainImages.shape[0], dimData) testData = testImages.reshape(testImages.shape[0], dimData) nClasses = len(np.unique(trainLabels)) trainData = np.array(trainData, 'float32') / 255 testData = np.array(testData, 'float32') / 255 trainLabelsOneHot = to_categorical(trainLabels) testLabelsOneHot = to_categorical(testLabels) model = Sequential() model.add(Dense(512, activation='relu', input_shape = (dimData,))) model.add(Dense(512, activation='relu')) model.add(Dense(nClasses, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(trainData, trainLabelsOneHot, batch_size = 256, epochs = 20, verbose = 1, validation_data = (testData, testLabelsOneHot)) [testLoss, testAcc] = model.evaluate(testData, testLabelsOneHot) print('Evaulation result on Test Data : Loss = {}, accuracy = {}'.format(testLoss, testAcc)) print('Training data shape : ', trainImages.shape, trainLabels.shape) print('Testing data shape : ', testImages.shape, testImages.shape) # Find the unique numbers from the train labels classes = np.unique(trainLabels) nClasses = len(classes) print('Total number of outputs : ', nClasses) print('Output classes : ', classes) model.save('model.h5') del model
''' Short script to run on quest to create and submit a request for forced photometry for every source in the 2018 sample ''' import pandas as pd import numpy as np import glob import subprocess info_path="/projects/p30796/ZTF/early_Ia/2018/info/" source_files = glob.glob(info_path+'force_phot*.fits') for source_file in source_files: source = source_file.split('/')[-1].split('_')[2] with open('{}_force_phot.sh'.format(source), 'w') as fw: print('''#!/bin/bash #MSUB -A p30796 #MSUB -q short #MSUB -l walltime=04:00:00 #MSUB -M my_email_address #MSUB -j oe ''',file=fw) print('#MSUB -N sys_{}'.format(source),file=fw) print('''#MSUB -l mem=90gb #MSUB -l nodes=1:ppn=28 #MSUB -l partition=quest8 # add a project directory to your PATH (if needed) export PATH=$PATH:/projects/p30796/tools/ # load modules you need to use module load python/anaconda source activate emcee3 # Run the actual commands to be executed cd /home/aam3503/software/adamamiller_git_clones/ForcePhotZTF python parallel_force_lc.py {} '''.format(source),file=fw) subprocess.call(['chmod', 'ugo+x', '{}_force_phot.sh'.format(source)]) subprocess.call(['msub', '{}_force_phot.sh'.format(source)])
import cv2 import numpy as np # add your video absolute path pathv = 'C:\\your path\\v_GolfSwing_g17_c05.avi' #or you can just ./v_GolfSwing_g17_c05.avi,but may report an error when you run the program cap = cv2.VideoCapture(pathv) ret, frame1 = cap.read() prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY) # optical flow visualization 光流可视化 # 由于frame的数据类型为np.uint8 即 usigned char, 最大存储值为255, 如果赋值为256, 结果为 0, # 也就是说及时赋值很大, 也会被截断 # 对于 饱和度s 和亮度v 而言, 最大值是255, # s = 255 色相最饱和, v = 255, 图片最亮 # 而对与颜色而言, opencv3中, (0, 180) 就会把所有颜色域过一遍, 所以这里下面就算角度时会除以 2 # np.zeros_like(): Return an array of zeros with the same shape and type as a given array. hsv = np.zeros_like(frame1) # hsv[...,1] = 255 # while(1): while cap.isOpened(): ret, frame2 = cap.read() next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY) flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0) #need to try mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) hsv[...,0] = ang*180/np.pi/2 # brief Normalizes the norm or value range of an array # norm_type = cv2.NORM_MINMAX, 即将值标准化到(0, 255) hsv[..., 1] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) # 亮度为255 hsv[..., 2] = 255 rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR) cv2.imshow('frame2',rgb) k = cv2.waitKey(30) & 0xff if k == 27: # if k == ord('q'): break elif k == ord('s'): cv2.imwrite('optical_ori.png',frame2) cv2.imwrite('optical_hsv.png',rgb) prvs = next cap.release() cv2.destroyAllWindows()
# For plotting # from environment.custom.knapsack.heuristic import solver import matplotlib.pyplot as plt import os import numpy as np # Import Google OR Tools Solver # from agents.optimum_solver import solver def plotter(data, env, agent, agent_config, opt_solver, print_details=False): # Destructure the tuple average_rewards_buffer, min_rewards_buffer, max_rewards_buffer, value_loss_buffer = data # Compute optimum solution optimum_value = 0 if opt_solver is not None: input_solver = env.convert_to_ortools_input() optimum_value = opt_solver(input_solver, print_details) else: optimum_value = 0 # Fill the array with the opt values # This will create a flat line opt_values = [optimum_value for i in range(len(average_rewards_buffer))] if env.name == 'CVRP': average_rewards_buffer = -1 * np.array(average_rewards_buffer) min_rewards_buffer = -1 * np.array(min_rewards_buffer) max_rewards_buffer = -1 * np.array(max_rewards_buffer) agent_name = agent.name env_name = env.name x_values = [i for i in range(len(average_rewards_buffer))] plt.plot(x_values, average_rewards_buffer, label="Average (in batch) Double Pointer Critic") plt.plot(x_values, min_rewards_buffer, label="Minimum (in Batch) Double Pointer Critic") plt.plot(x_values, max_rewards_buffer, label="Maximum (in batch) Double Pointer Critic") plt.plot(x_values, opt_values, label="Optimal") plt.ylabel('Collected Reward') plt.xlabel('Episode') gamma = agent_config['gamma'] mha_mask = agent_config['use_mha_mask'] entropy = agent_config['entropy_coefficient'] td = agent_config['actor']['encoder_embedding_time_distributed'] actor_lr = agent_config['actor']['learning_rate'] critic_lr = agent_config['critic']['learning_rate'] file_name = f"g:{gamma}|e:{entropy}|td:{td}|ac_lr:{actor_lr}|cr_lr:{critic_lr}|mha_mask:{mha_mask}" plot_title = f"{agent_name.upper()}\n|" + file_name plt.title(plot_title) saveDir = f"./media/plots/{env_name}/{agent_name}" # Check if dir exists. If not, create it if not os.path.isdir(saveDir): os.makedirs(saveDir) # Show legend info plt.legend() # plt.show(block=blockPlot) plt.savefig( f"{saveDir}/{file_name.replace(' ', '')}.png", dpi = 200, bbox_inches = "tight" ) plt.close() def plot_attentions(attentions, num_items, num_backpacks): fig, axs = plt.subplots(num_items, 2) for index, attention in enumerate(attentions): # Only show the attention over the items axs[index, 0].matshow(attention['resource_attention'][:, num_backpacks:]) # axs[index, 0].set_title('Item Attention') # Only show the attention over the backpacks axs[index, 1].matshow(attention['bin_attention'][:, :num_backpacks]) # axs[index, 1].set_title('Backpack Attention') for index in range(num_items): # Select the plot by index for the Items plt.sca(axs[index, 0]) # Add the ticks and the labels item_input = attentions[index]["resource_net_input"] item_ylabel = f'w:{int(item_input[0,0,0])} v:{int(item_input[0,0,1])}' plt.yticks([0], [item_ylabel]) item_states = attentions[index]['current_state'][0, num_backpacks:] item_xlabel = [] for itm in item_states: item_xlabel.append( f'w:{int(itm[0])} v:{int(itm[1])}' ) plt.xticks(range(len(item_xlabel)), item_xlabel) # Select the plot by index for the Backpacks plt.sca(axs[index, 1]) # Add the ticks and the labels item_input = attentions[index]["bin_net_input"] backpack_ylabel = f'w:{int(item_input[0,0,0])} v:{int(item_input[0,0,1])}' plt.yticks([0], [backpack_ylabel]) backpack_states = attentions[index]['current_state'][0, :num_backpacks] backpack_xlabel = [] for bp in backpack_states: backpack_xlabel.append( f'c:{int(bp[0])} l:{int(bp[1])}' ) plt.xticks(range(len(backpack_xlabel)), backpack_xlabel) # plt.subplots_adjust(wspace=0.3, hspace = 0.3) plt.show(block=True) if __name__ == "__main__": # pragma: no cover plot_attentions() # tuner()
import os import shutil import urllib.request from qtpy.QtWidgets import QMessageBox from qtpy.QtCore import Qt from specviz.plugins.loader_wizard.loader_wizard import (ASCIIImportWizard, parse_ascii, simplify_arrays) def test_loader_wizard(tmpdir, qtbot, monkeypatch): # Monkeypatch the QMessageBox widget so that it doesn't block the test # progression. In this case, accept the information dialog indicating that # a loader has been saved. monkeypatch.setattr(QMessageBox, "information", lambda *args: QMessageBox.Ok) tmpfile = str(tmpdir.join('example.txt')) data_url = 'https://stsci.app.box.com/index.php?rm=box_download_shared_file' \ '&shared_name=zz2vgbreuzhjtel0d5u96r30oofolod7&file_id=f_345743002081' with urllib.request.urlopen(data_url) as response: with open(tmpfile, 'wb') as handle: shutil.copyfileobj(response, handle) # Read in table from temp file and load wizard widget arrays = simplify_arrays(parse_ascii(tmpfile)) widget = ASCIIImportWizard(tmpfile, arrays) qtbot.addWidget(widget) widget.line_table_read.setText('format="ascii"') widget._update_data() # set units and column choices widget.combo_dispersion_units.setCurrentIndex(0) widget.combo_data_component.setCurrentIndex(1) widget.combo_data_units.setCurrentIndex(2) # Set loader name and run save check widget.loader_name.setText("loadertest") assert widget.save_loader_check() filename = os.path.join(tmpdir, "loader_temp.py") widget.save_register_new_loader(filename) with open(filename) as f: created_out = f.read() # Not the nicest text block, but better then putting the comparison # file in git. expected_out = """import os from astropy.table import Table from astropy.units import Unit from specutils.io.registers import data_loader from specutils import Spectrum1D @data_loader(label="loadertest") def simple_generic_loader(file_name): # Use name of the file for the spectra object that's created # when the data is loaded. name = os.path.basename(file_name.rstrip(os.sep)).rsplit('.', 1)[0] ast_table = Table.read(file_name, format="ascii") flux = ast_table["flux"].flatten() wavelength = ast_table["wavelength"].flatten() # Set units unit = Unit("Jy") disp_unit = Unit("Angstrom") # A new spectrum object is returned, which specviz understands return Spectrum1D(spectral_axis=wavelength*disp_unit, flux=flux*unit) """ assert created_out == expected_out
# -*- coding: utf-8 -*- # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # You can only use this computer program if you have closed # a license agreement with MPG or you get the right to use the computer # program from someone who is authorized to grant you that right. # Any use of the computer program without a valid license is prohibited and # liable to prosecution. # # Copyright©2019 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # Contact: ps-license@tuebingen.mpg.de import sys sys.path.append('.') import glob import torch import joblib import argparse from tqdm import tqdm import os.path as osp from skimage import io from scipy.io import loadmat from lib.models import spin from lib.data_utils.kp_utils import * from lib.core.config import VIBE_DB_DIR from lib.data_utils.img_utils import get_bbox_from_kp2d from lib.data_utils.feature_extractor import extract_features def calc_kpt_bound(kp_2d): MAX_COORD = 10000 x = kp_2d[:, 0] y = kp_2d[:, 1] z = kp_2d[:, 2] u = MAX_COORD d = -1 l = MAX_COORD r = -1 for idx, vis in enumerate(z): if vis == 0: # skip invisible joint continue u = min(u, y[idx]) d = max(d, y[idx]) l = min(l, x[idx]) r = max(r, x[idx]) return u, d, l, r def load_mat(path): mat = loadmat(path) del mat['pose'], mat['__header__'], mat['__globals__'], mat['__version__'], mat['train'], mat['action'] mat['nframes'] = mat['nframes'][0][0] return mat def read_data(folder): dataset = { 'img_name' : [], 'joints2D': [], 'bbox': [], 'vid_name': [], 'features': [], } model = spin.get_pretrained_hmr() file_names = sorted(glob.glob(folder + '/labels/'+'*.mat')) for fname in tqdm(file_names): vid_dict=load_mat(fname) imgs = sorted(glob.glob(folder + '/frames/'+ fname.strip().split('/')[-1].split('.')[0]+'/*.jpg')) kp_2d = np.zeros((vid_dict['nframes'], 13, 3)) perm_idxs = get_perm_idxs('pennaction', 'common') kp_2d[:, :, 0] = vid_dict['x'] kp_2d[:, :, 1] = vid_dict['y'] kp_2d[:, :, 2] = vid_dict['visibility'] kp_2d = kp_2d[:, perm_idxs, :] # fix inconsistency n_kp_2d = np.zeros((kp_2d.shape[0], 14, 3)) n_kp_2d[:, :12, :] = kp_2d[:, :-1, :] n_kp_2d[:, 13, :] = kp_2d[:, 12, :] kp_2d = n_kp_2d bbox = np.zeros((vid_dict['nframes'], 4)) for fr_id, fr in enumerate(kp_2d): u, d, l, r = calc_kpt_bound(fr) center = np.array([(l + r) * 0.5, (u + d) * 0.5], dtype=np.float32) c_x, c_y = center[0], center[1] w, h = r - l, d - u w = h = np.where(w / h > 1, w, h) bbox[fr_id,:] = np.array([c_x, c_y, w, h]) dataset['vid_name'].append(np.array([f'{fname}']* vid_dict['nframes'])) dataset['img_name'].append(np.array(imgs)) dataset['joints2D'].append(kp_2d) dataset['bbox'].append(bbox) features = extract_features(model, np.array(imgs) , bbox, dataset='pennaction', debug=False) dataset['features'].append(features) for k in dataset.keys(): dataset[k] = np.array(dataset[k]) for k in dataset.keys(): dataset[k] = np.concatenate(dataset[k]) return dataset if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--dir', type=str, help='dataset directory', default='data/pennaction') args = parser.parse_args() dataset = read_data(args.dir) joblib.dump(dataset, osp.join(VIBE_DB_DIR, 'pennaction_train_db.pt'))
# -*- coding: utf-8 -*- """ Created on Fri Jan 10 14:01:21 2020 @author: Jakob """ ########################################################### ### Imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import math import networkx as nx import time import pickle t1 = time.time() fig = plt.figure() # Print options np.set_printoptions(precision=3, floatmode='fixed', suppress=True) # Gloabal parameter inputs n = 50 # Number of agents delta = 0.3 # weight placed on indirect links gamma = 0.5 # weight placed on additional utility derived from a mutual link c = 0.2 # cost of forming and maintaining links b = 0.3 # strength of preference for links to similar agents sigma = 0.01 # standard deviation of the shocks to utility alpha = 2.1 # convexity of costs share_red = 1/3 share_blue = 1/3 share_green = 1-share_red-share_blue possible_X = [[1, 0],[0, 1],[1,1]] # Randomly generate the matrix of characteristics (Generating this randomly is NOT a good idea) # Note that this way of generating guarantees that X_i=[0,0] does not occur #X_ind = np.random.choice(len(possible_X), size=n, p=[share_red,share_blue,share_green]) #X = np.array([possible_X[X_ind[i]] for i in range(len(X_ind))]) # Generate proportional green blue and reds for sure (makes simulation more stable) share_red = np.round(share_red, decimals=1) share_blue = np.round(share_blue, decimals=1) X = np.array([possible_X[0] for i in range(int(share_red*n))] + [possible_X[1] for i in range(int(share_blue*n))] + [possible_X[2] for i in range(n-int(share_red*n)-int(share_blue*n))]) # Randomly generate the initial network configuration p_link_0 = 0.1 # Uniform initial link probability g_0 = np.random.choice([0, 1], size=(n,n), p=[1-p_link_0,p_link_0]) np.fill_diagonal(g_0, 0) # The diagonal elements of the adjacency matrix are 0 by convention # Sequence of adjacency matrices def u(i,j,X): """ Returns the partial utility given X_i and X_j using the exp(-b*L1-norm of their difference)""" return math.exp(-b*np.linalg.norm((X[i] - X[j]), ord=1)) def make_pre_cal_u(n, X): """ Make the U matrix for the entire system """ pre_cal_u = np.zeros((n, n)) for i in range(n): for j in range(n): pre_cal_u[i, j] = u(i,j,X) return pre_cal_u def U(g, i) : """ Returns the full utility of agent i given the current network structure g and the matrix of characteristics X """ d_i = g[i].sum() direct_u = np.sum(g[i] * pre_U[i]) mutual_u = np.sum(g[i] * g.T[i] * pre_U[i]) # Indirect a = (g.T.dot(g[i, :]) * pre_U)[i] a[i] = 0 indirect_u = np.sum(a) return direct_u + gamma * mutual_u + delta * indirect_u - d_i ** alpha * c def step(g,indexes): """ Randomly selects an agent i to revise their link with another random agent j. Returns the updated adjacency matrix """ eps = np.random.normal(scale=sigma, size=n*2) for i in indexes: r1 = i while r1 == i: r1 = np.random.choice(indexes) g[i, r1] = 0 U_without = U(g, i) + eps[i] g[i, r1] = 1 U_with = U(g, i) + eps[-i] if U_without > U_with: g[i, r1] = 0 else: g[i, r1] = 1 return g def plot_network(g): """ Uses networkX to plot the directed network g """ rows, cols = np.where(g == 1) edges = zip(rows.tolist(), cols.tolist()) gr = nx.DiGraph() # Calling the DIRECTED graph method gr.add_nodes_from(range(n)) gr.add_edges_from(edges) # Add node colors according to X color_map = [] for i in range(n): if np.all(X[i]==possible_X[0]): color_map.append('red') if np.all(X[i]==possible_X[1]): color_map.append('blue') if np.all(X[i]==possible_X[2]): color_map.append('green') fig.clear() nx.draw(gr, node_color=color_map, with_labels=True, node_size=500) plt.pause(0.5) # Run the simulation for T total steps or until convergence is reached T = 10000 T = int(T/n) t_plot = 20 pre_U = make_pre_cal_u(n, X) indexes = list(range(n)) g_sequence = np.zeros((T, n,n)) zero_sequence = np.zeros(T) g_sequence[0] = g_0 g_old = g_0 n_zeros = 3 begin_conv = 20 zeros = np.zeros(n_zeros) zero_sequence[0] = 1.0 for t in range(1, T): print(t, end='\r') np.random.shuffle(indexes) # Perform a step and attach the new network g_sequence[t] = step(g_sequence[t - 1], indexes) zero_sequence[t] = (np.sum(g_sequence[t] - g_sequence[t - 1])) try: if t > begin_conv and zero_sequence[t - n_zeros :t].any() == zeros.any() : print(zero_sequence[t - n_zeros :t]) print(t, 'STOP') break except: pass # Produce a plot and diagnostics every t_plot steps if (t+1)%t_plot == 0: plot_network(g_sequence[t]) pickle.dump(g_sequence, open('gsec.p', 'wb')) print(time.time() - t1) plot_network(g_sequence[t]) print("done") plt.show()
import click as ck import pandas as pd from ont import Ontology import dgl from dgl import nn as dglnn import torch as th import numpy as np from torch import nn from torch.nn import functional as F from torch import optim from sklearn.metrics import roc_curve, auc, matthews_corrcoef import copy from torch.utils.data import DataLoader from RelAtt.relGraphConv import RelGraphConv from RelAtt.baseRGCN import BaseRGCN from dgl.nn import GraphConv, AvgPooling, MaxPooling import random th.manual_seed(0) np.random.seed(0) random.seed(0) @ck.command() @ck.option( '--train-inter-file', '-trif', default='data/4932.train_interactions.pkl', help='Interactions file (deepint_data.py)') @ck.option( '--test-inter-file', '-tsif', default='data/4932.test_interactions.pkl', help='Interactions file (deepint_data.py)') @ck.option( '--data-file', '-df', default='data/swissprot.pkl', help='Data file with protein sequences') @ck.option( '--deepgo-model', '-dm', default='data/deepgoplus.h5', help='DeepGOPlus prediction model') @ck.option( '--model-file', '-mf', default='data/9606.model.h5', help='DeepGOPlus prediction model') @ck.option( '--batch-size', '-bs', default=32, help='Batch size for training') @ck.option( '--epochs', '-ep', default=32, help='Training epochs') @ck.option( '--load', '-ld', is_flag=True, help='Load Model?') def main(train_inter_file, test_inter_file, data_file, deepgo_model, model_file, batch_size, epochs, load): device = 'cuda' with_ic = False if with_ic: feat_dim = 3 else: feat_dim = 2 with_disjoint = False with_intersection = False inverse = False rels = ['part_of', 'regulates'] g, annots, prot_idx = load_graph_data(data_file, rels = rels, with_ic = with_ic, with_disjoint = with_disjoint, with_intersection = with_intersection, inverse = inverse) num_rels = len(g.canonical_etypes) g = dgl.to_homogeneous(g) #print("HOMOGENOUS GRAPH: " + str(g.number_of_edges())) num_nodes = g.number_of_nodes() print(f"Num nodes: {num_nodes}") annots = th.FloatTensor(annots) train_df, test_df = load_ppi_data(train_inter_file, test_inter_file) model = PPIModel(feat_dim, num_rels, num_rels, num_nodes) model.to(device) loss_func = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr=0.001) train_labels = th.FloatTensor(train_df['labels'].values).to(device) test_labels = th.FloatTensor(test_df['labels'].values).to(device) train_set_batches = get_batches(g, annots, prot_idx, train_df, train_labels, batch_size, with_ic = with_ic) test_set_batches = get_batches(g, annots, prot_idx, test_df, test_labels, batch_size, with_ic = with_ic) for epoch in range(epochs): epoch_loss = 0 model.train() for iter, (batch, labels) in enumerate(train_set_batches): logits = model(batch.to(device)) labels = labels.unsqueeze(1).to(device) loss = loss_func(logits, labels) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss += loss.detach().item() epoch_loss /= (iter+1) model.eval() test_loss = 0 preds = [] with th.no_grad(): for iter, (batch, labels) in enumerate(test_set_batches): logits = model(batch.to(device)) labels = labels.unsqueeze(1).to(device) loss = loss_func(logits, labels) test_loss += loss.detach().item() preds = np.append(preds, logits.cpu()) test_loss /= (iter+1) labels = test_df['labels'].values roc_auc = compute_roc(labels, preds) print(f'Epoch {epoch}: Loss - {epoch_loss}, \tTest loss - {test_loss}, \tAUC - {roc_auc}') def compute_roc(labels, preds): # Compute ROC curve and ROC area for each class fpr, tpr, _ = roc_curve(labels.flatten(), preds.flatten()) roc_auc = auc(fpr, tpr) return roc_auc def get_batches(graph, annots, prot_idx, df, labels, batch_size, with_ic = False): dataset = [] with ck.progressbar(df.itertuples(), length=len(df)) as bar: for row in bar: i = bar.pos p1, p2 = row.interactions label = labels[i].view(1, 1) if p1 not in prot_idx or p2 not in prot_idx: continue pi1, pi2 = prot_idx[p1], prot_idx[p2] if with_ic: feat = annots[:, [0, pi1+1, pi2+1]] else: feat = annots[:, [pi1, pi2]] graph_cp = copy.deepcopy(graph) graph_cp.ndata['feat'] = feat dataset.append((graph_cp, label)) return DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn=collate) def collate(samples): # The input `samples` is a list of pairs # (graph, label). graphs, labels = map(list, zip(*samples)) batched_graph = dgl.batch(graphs) return batched_graph, th.tensor(labels) class EmbeddingLayer(nn.Module): def __init__(self, num_nodes, num_rels, h_dim): super(EmbeddingLayer, self).__init__() self.n_embedding = th.nn.Linear(num_nodes, h_dim) self.e_embedding = th.nn.Embedding(num_rels, h_dim) self.num_nodes = num_nodes self.num_rels = num_rels self.h_dim = h_dim def forward(self, g, hn, r, he, norm): return self.n_embedding(hn), self.e_embedding(he.squeeze()) class RGCN(BaseRGCN): def build_input_layer(self): return EmbeddingLayer(2, self.num_rels, self.h_dim) def build_hidden_layer(self, idx): act = F.relu if idx < self.num_hidden_layers - 1 else None return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "basis", self.num_bases, activation=act, self_loop=True, dropout=self.dropout, low_mem = True) class PPIModel(nn.Module): def __init__(self, h_dim, num_rels, num_bases, num_nodes): super().__init__() self.h_dim = h_dim self.num_rels = num_rels self.num_bases = None if num_bases < 0 else num_bases self.num_nodes = num_nodes print(f"Num rels: {self.num_rels}") print(f"Num bases: {self.num_bases}") self.rgcn = RGCN(self.h_dim, self.h_dim, self.h_dim, self.num_rels, self.num_bases, num_hidden_layers=2, dropout=0.3, use_self_loop=False, use_cuda=True ) # self.avgpool = AvgPooling() # self.maxpool = MaxPooling() # self.fc = nn.Linear(4, 1) self.fc = nn.Linear(self.num_nodes*self.h_dim, 1) def forward(self, g): features = g.ndata['feat'] edge_type = g.edata[dgl.ETYPE].long() edge_feat = th.arange(self.num_rels).view(-1, 1).long().cuda() x, _ = self.rgcn(g, features, edge_type, edge_feat, None) #x = th.cat([self.avgpool(g, x), self.maxpool(g, x)], dim=-1) x = th.flatten(x).view(-1, self.num_nodes*self.h_dim) return th.sigmoid(self.fc(x)) def load_ppi_data(train_inter_file, test_inter_file): train_df = pd.read_pickle(train_inter_file) index = np.arange(len(train_df)) np.random.seed(seed=0) np.random.shuffle(index) train_df = train_df.iloc[index[:10000]] test_df = pd.read_pickle(test_inter_file) index = np.arange(len(test_df)) np.random.seed(seed=0) np.random.shuffle(index) test_df = test_df.iloc[index[:1000]] return train_df, test_df def load_graph_data(data_file, rels = [], with_ic = False, with_disjoint = False, with_intersection = False, inverse = True): go = Ontology('data/go.obo', rels, with_disjoint, with_intersection, inverse) nodes = list(go.ont.keys()) node_idx = {v: k for k, v in enumerate(nodes)} g = go.toDGLGraph() g = dgl.add_self_loop(g, 'is_a') df = pd.read_pickle(data_file) df = df[df['orgs'] == '559292'] if with_ic: go.calculate_ic(df['prop_annotations']) annotations = np.zeros((len(nodes), len(df) + 1), dtype=np.float32) for i, go_id in enumerate(go.ont.keys()): annotations[i, 0] = go.get_ic(go_id) else: annotations = np.zeros((len(nodes), len(df)), dtype=np.float32) prot_idx = {} for i, row in enumerate(df.itertuples()): prot_id = row.accessions.split(';')[0] prot_idx[prot_id] = i for go_id in row.prop_annotations: if go_id in node_idx: if with_ic: annotations[node_idx[go_id], i + 1] = 1 else: annotations[node_idx[go_id], i] = 1 return g, annotations, prot_idx if __name__ == '__main__': main()
from models import Actor, Critic from memory import ReplayMemory import torch import torch.nn as nn import numpy as np from utils import * from vizdoom import * from collections import deque import torch from torch.autograd import Variable import numpy as np import random import skimage import skimage.transform from models import * import time import itertools as it import os from torch.utils.tensorboard import SummaryWriter from datetime import datetime from a2c_agent import * def setup_experiment(title, logdir): experiment_name = "{}@{}".format(title, datetime.now().strftime("%d.%m.%Y-%H:%M:%S")) writer = SummaryWriter(log_dir=os.path.join(logdir, experiment_name)) return writer, experiment_name def stack_frames(stacked_frames, state, is_new_episode, maxlen=4, resize=(120, 160)): """ stacked_frames : collections.deque object of maximum length maxlen. state : the return of get_state() function. is_new_episode : boolean, if it's a new episode, we stack the same initial state maxlen times. maxlen : Int, maximum length of stacked_frames (default=4) resize : tuple, shape of the resized frame (default=(120,160)) Returns -------------- stacked_state : 4-D Tensor, same information as stacked_frames but in tensor. This represents a state. stacked_frames : the updated stacked_frames deque. """ # Preprocess frame frame = screen_process(state) frame = torch.tensor(frame, dtype=torch.float) if is_new_episode: # Clear our stacked_frames stacked_frames = deque([frame[None] for i in range(maxlen)], maxlen=maxlen) # We add a dimension for the batch # Stack the frames stacked_state = torch.cat(tuple(stacked_frames), dim=0).unsqueeze(0) else: # Append frame to deque, automatically removes the oldest frame stacked_frames.append(frame[None]) # We add a dimension for the batch # Build the stacked state (first dimension specifies different frames) stacked_state = torch.cat(tuple(stacked_frames), dim=0).unsqueeze(0) return Variable(stacked_state), stacked_frames def predict_action(explore_start, explore_stop, decay_rate, decay_step, state, model, device, actions): """ explore_start : Float, the initial exploration probability. explore_stop : Float, the last exploration probability. decay_rate : Float, the rate at which the exploration probability decays. state : 4D-tensor (batch, motion, image) model : DQNetwork actions : List, the one-hot encoded possible actions. Returns ------------- action : np.array of shape (number_actions,), the action chosen by the greedy policy. explore_probability : Float, the exploration probability. """ exp_exp_tradeoff = np.random.rand() explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step) if (explore_probability > exp_exp_tradeoff): action = random.choice(actions) else: Qs = model.forward(state.to(device)) action = actions[int(torch.max(Qs, 1)[1][0])] return action, explore_probability def create_environment(scenario='basic', window=False): """ Description --------------- Creates VizDoom game instance along with some predefined possible actions. Parameters --------------- scenario : String, either 'basic' or 'deadly_corridor' or 'defend_the_center', the Doom scenario to use (default='basic') window : Boolean, whether to render the window of the game or not (default=False) Returns --------------- game : VizDoom game instance. actions : List, the one-hot encoded possible actions. """ game = DoomGame() # Load the correct configuration game.load_config("ViZDoom/scenarios/defend_the_center.cfg") game.set_doom_scenario_path("ViZDoom/scenarios/defend_the_center.wad") game.set_window_visible(window) game.init() n_actions = game.get_available_buttons_size() actions = [list(a) for a in it.product([0, 1], repeat=n_actions)] # left = [1, 0, 0] # right = [0, 1, 0] # shoot = [0, 0, 1] # actions = [left, right, shoot] return game, actions def screen_process(screen, size=(120, 160)): screen = skimage.transform.resize(screen, size) screen = skimage.color.rgb2gray(screen) return screen def get_state(game): """ Description -------------- Get the current state from the game. Parameters -------------- game : VizDoom game instance. Returns -------------- state : 4-D Tensor, we add the temporal dimension. """ state = game.get_state().screen_buffer # shape = (3, 480, 640) return np.transpose(state, [1, 2, 0]) # return state[:, :, None] # shape = (3, 480, 1, 640) def test_environment(weights, scenario='defend_the_center', model_type='DQN', window=False, total_episodes=3, frame_skip=2, stack_size=4): """ weights : String, path to .pth file containing the weights of the network we want to test. scenario : String, either 'basic' or 'deadly_corridor', the Doom scenario to use (default='basic') window : Boolean, whether to render the window of the game or not (default=False) total_episodes : Int, the number of testing episodes (default=100) enhance : String, 'none' or 'dueling' (default='none') frame_skip : Int, the number of frames to repeat the action on (default=2) Returns --------------- game : VizDoom game instance. actions : List, the one-hot encoded possible actions. """ game, actions = create_environment(scenario=scenario, window=window) n_actions = len(actions) if model_type == 'DQN': model = DQN( stack_size=stack_size, n_actions=n_actions) else: model = DDDQN( stack_size=stack_size, n_actions=n_actions) state_dict = torch.load(weights) model.load_state_dict(state_dict) for i in range(total_episodes): game.new_episode() is_finished = game.is_episode_finished() state = get_state(game) state, stacked_frames = stack_frames(None, state, True, stack_size) while not is_finished: q = model.forward(state) # action = random.choice(actions) action = actions[int(torch.max(q, 1)[1][0])] reward = game.make_action(action, frame_skip) is_finished = game.is_episode_finished() if not is_finished: state = get_state(game) state, stacked_frames = stack_frames(stacked_frames, state, False, stack_size) time.sleep(0.02) print ("Total reward:", game.get_total_reward()) time.sleep(0.1) game.close() def test_a2c(weights, scenario='defend_the_center', window=False, total_episodes=5, frame_skip=2, stack_size=4, resolution=(120, 160)): game, actions = create_environment(scenario=scenario, window=window) n_actions = len(actions) agent = A2CAgent(actions, stack_size, resolution) state_dict = torch.load(weights) agent.actor.load_state_dict(state_dict) for i in range(total_episodes): game.new_episode() is_finished = game.is_episode_finished() state = get_state(game) state, stacked_frames = stack_frames(None, state, True, stack_size) while not is_finished: policy = agent.actor.forward(state) action_id = torch.argmax(policy.probs) action = actions[action_id] reward = game.make_action(action, frame_skip) is_finished = game.is_episode_finished() if not is_finished: state = get_state(game) state, stacked_frames = stack_frames(stacked_frames, state, False, stack_size) time.sleep(0.02) print ("Total reward:", game.get_total_reward()) time.sleep(0.1) game.close() class A2CAgent: def __init__(self, actions, scenario, stack_size, resolution): # possible actions self.actions = actions self.scenario = scenario # sizes self.n_actions = len(actions) self.stack_size = stack_size self.resolution = resolution # memory for one episode self.rewards = [] self.log_probs = [] self.values = [] # device self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(self.device) # networks self.actor = Actor(self.n_actions, stack_size).to(self.device) self.critic = Critic(1, stack_size).to(self.device) def _reshape_reward(self, reward, obs_cur, obs_prev): """ obs_cur : dict containing current observation (kills, health and ammo). obs_prev : dict containing previous observation (kills, health and ammo). """ if self.scenario == 'basic': return reward elif self.scenario == 'defend_the_center': if obs_cur['health'] < obs_prev['health']: health_decrease = -0.2 else: health_decrease = 0 if obs_cur['ammo'] < obs_prev['ammo'] and reward <= 0: miss_penalty = -0.2 else: # miss miss_penalty = 0 return reward + miss_penalty + health_decrease elif self.scenario == 'deadly_corridor': ammo_decrease = int(obs_cur['ammo'] < obs_prev['ammo']) health_decrease = int(obs_cur['health'] < obs_prev['health']) penalty = -0.5 * (ammo_decrease + health_decrease) kill_reward = (obs_prev['kills'] - obs_cur['kills']) * 40 return reward / 2.0 - penalty + kill_reward def train(self, environment, writer=None, total_episodes=20, frame_skip=4, actor_lr=1e-4, critic_lr=1e-4, freq=3): self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr) torch.set_default_dtype(torch.float) for episode in range(total_episodes): # init new episode environment.game.new_episode() state = environment.get_state() obs_cur, obs_prev = environment.init_observations() # prepare stack of frames state, stacked_frames = stack_frames(None, state, True, self.stack_size, self.resolution) state = Variable(state, requires_grad=True) # check episode is finished is_finished = environment.is_episode_finished() while not is_finished: # sample action from stochastic softmax policy policy, value = self.actor(state.to(self.device)), self.critic(state.to(self.device)) action_id = policy.sample() log_prob = policy.log_prob(action_id).unsqueeze(0) action = self.actions[action_id] # make action and get reward reward = environment.make_action(action, frame_skip) obs_cur = environment.get_observation_cur() reward = self._reshape_reward(reward, obs_cur, obs_prev) obs_prev = obs_cur.copy() # fill memory self.log_probs.append(log_prob) self.rewards.append(reward) self.values.append(value) # check episode is finished is_finished = environment.is_episode_finished() if not is_finished: # get new state next_state = environment.get_state() state, stacked_frames = stack_frames( stacked_frames, next_state, False, self.stack_size, self.resolution) state = Variable(state, requires_grad=True) else: # every episode agent learns print('Episode finished, training...') actor_loss, critic_loss = self.train_on_episode() episode_reward = sum(self.rewards) kill_count = obs_cur['kills'] print( "Episode: %d, Total reward: %.2f, Kill Count: %.1f, Actor loss: %.4f, Critic loss: %.4f" % ( episode, episode_reward, kill_count, actor_loss, critic_loss)) self.log_probs, self.rewards, self.values = [], [], [] # save model if (episode % freq) == 0: model_file = 'models/' + environment.scenario + '/' + 'A2C' + '_' + str(episode) + '.pth' torch.save(self.actor.state_dict(), model_file) print('Saved model to ' + model_file) def discount_rewards(self, gamma=0.99): discounted_rewards = np.zeros_like(self.rewards) running_add = 0 for t in reversed(range(0, len(self.rewards))): if self.rewards[t] != 0: running_add = 0 running_add = running_add * gamma + self.rewards[t] discounted_rewards[t] = running_add return discounted_rewards def train_on_episode(self): discounted_rewards = torch.tensor(self.discount_rewards(), dtype=torch.float).reshape(-1, 1).to(self.device) log_probs = torch.cat(self.log_probs).to(self.device) values = torch.cat(self.values).to(self.device) # print(log_probs.requires_grad) # print(discounted_rewards.shape) # print(values.requires_grad) advantage = discounted_rewards - values # print(advantage.requires_grad) actor_loss = -(log_probs * advantage.detach()).mean() critic_loss = advantage.pow(2).mean() self.actor_optimizer.zero_grad() self.critic_optimizer.zero_grad() actor_loss.backward() critic_loss.backward() self.actor_optimizer.step() self.critic_optimizer.step() return actor_loss.cpu().item(), critic_loss.cpu().item()
import os import sys import time import random from models import cnn from save_plot import save_plot from dataset import load_dataset from model_callbacks import model_callbacks from pycm import * import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.optimizers import Adam from sklearn.model_selection import KFold, train_test_split from sklearn.metrics import classification_report, confusion_matrix import tensorflow as tf from keras import backend as K from keras.utils import plot_model from keras.models import load_model from keras.constraints import max_norm from keras import metrics, regularizers, initializers np.random.seed(14) MODEL_PATH = "model-save/" MODEL_SAVED = MODEL_PATH + "model.h5" def main(epoch): img_array, labels_array, class_weight = load_dataset() trainData, testData, trainLabels, testLabels = train_test_split(img_array, labels_array, test_size = 0.3, shuffle = True, random_state = None) print("\n[INFO] trainData shape: {}\n testData shape: {}\n\n\ trainLabels shape: {}\n testLabels shape: {}\n".format(trainData.shape, testData.shape, trainLabels.shape, testLabels.shape)) # DEFINE MODEL PARAMETERS input_img = (trainData.shape[1], trainData.shape[2], 3) model = load_model(MODEL_SAVED, compile = True) print("Loaded model from disk") print(model.summary()) #print(model.get_weights()) #print(model.optimizer) time.sleep(2) # MODEL SETTINGS BATCH_SIZE = 10 EPOCHS = epoch calls = model_callbacks(EPOCHS, BATCH_SIZE) # Model Fit history = model.fit(trainData, trainLabels, batch_size = BATCH_SIZE, epochs = EPOCHS, verbose = 1, validation_data = (testData, testLabels), class_weight = class_weight, shuffle = True, callbacks = calls) # model.save(MODEL_SAVED) # Model evaluate # Model open # loaded_model = load_model(MODEL_SAVED) print("Loaded model from disk to evaluate") scores = model.evaluate(testData, testLabels) print("\nAccuracy: %.2f%%" % (scores[1]*100)) save_plot(history) if __name__ == "__main__": if len(sys.argv) != 2: print("[INFO] Uso: python oil_class.py EPOCHS") epoch = int(sys.argv[1]) main(epoch) # ultima epoca melhorada:
import os import numpy as np from optparse import OptionParser import glob import pandas as pd def go_for_batch(toproc, splitSky, dbDir, dbExtens, outDir, metricName, nodither, nside, fieldType, band, pixelmap_dir, npixels, proxy_level): """ Function to prepare and start batches Parameters ---------------- toproc: numpy array data (dbName, ...) to process splitSky: bool to split the batches in sky patches dbDir: str dir where observing strategy files are located dbExtens: str extension of obs. strategy files (npy or db) outDir: str output directory for the produced data metricName: str name of the metric to run nodither: bool to remove the dithereing (useful for dedicated DD studies) nside: int healpix nside parameter fieldType: str type of field to process (DD, WFD, Fakes) band: str band to consider (for some metrics like SNR) pixelmap_dir: str directory where pixel maps (ie matched pixel positions and observations) are located npixels: int number of pixels to process proxy_level: int proxy level - for NSN metric only (possible values: 0,1,2) """ # get the observing strategy name #dbName = toproc['dbName'].decode() dbName = toproc['dbName'] if pixelmap_dir == '': # first case: no pixelmap - run on all the pixels - possibility to split the sky n_per_slice = 1 RAs = [0., 360.] if splitSky: RAs = np.linspace(0., 360., 11) for ira in range(len(RAs)-1): RA_min = RAs[ira] RA_max = RAs[ira+1] batchclass(dbName, dbDir, dbExtens, 'run_scripts/metrics/run_metrics', outDir, 8, 1, metricName, toproc, nodither, nside, fieldType, RA_min, RA_max, -1.0, -1.0, band, pixelmap_dir, npixels, proxy_level, npixels) else: # second case: there are pixelmaps available -> run on them # first: get the skymap fileSky = glob.glob('{}/skypatch*.npy'.format(pixelmap_dir)) skyMap = np.load(fileSky[0]) print(skyMap) # get the total number of pixels in this skyMap # get the total number of pixels - this is requested if npixels >= 0 and npixels!=-1 # npixels=-1 means processing all pixels npixels_tot = 0 if npixels > 0: for val in skyMap: search_path = '{}/{}/{}_{}_nside_{}_{}_{}_{}_{}.npy'.format( pixelmap_dir, dbName, dbName, fieldType, nside, val['RAmin'], val['RAmax'], val['Decmin'], val['Decmax']) ffi = glob.glob(search_path) if len(ffi) == 0: print('potential problem here', search_path) tab = np.load(ffi[0]) # print(len(np.unique(tab['healpixID']))) npixels_tot += len(np.unique(tab['healpixID'])) # print(npixels_tot) # now redo the loop and run batches for val in skyMap: # get the number of pixels for this map search_path = '{}/{}/{}_{}_nside_{}_{}_{}_{}_{}.npy'.format( pixelmap_dir, dbName, dbName, fieldType, nside, val['RAmin'], val['RAmax'], val['Decmin'], val['Decmax']) ffi = glob.glob(search_path) if len(ffi) == 0: print('potential problem here', search_path) tab = np.load(ffi[0]) npixels_map = len(np.unique(tab['healpixID'])) # print('pixel_map',val['RAmin'],val['RAmax'],npixels_map) npixel_proc = npixels if npixels > 0: num = float(npixels*npixels_map)/float(npixels_tot) npixel_proc = int(round(num)) # print('hoio',npixel_proc,num) batchclass(dbName, dbDir, dbExtens, 'run_scripts/metrics/run_metrics', outDir, 8, 1, metricName, toproc, nodither, nside, fieldType, val['RAmin'], val['RAmax'], val['Decmin'], val['Decmax'], band=band, pixelmap_dir=pixelmap_dir, npixels=npixel_proc, proxy_level=proxy_level, npixels_tot=npixels) class batchclass: def __init__(self, dbName, dbDir, dbExtens, scriptref, outDir, nproccomp, saveData, metric, toprocess, nodither, nside, fieldType='WFD', RA_min=0.0, RA_max=360.0, Dec_min=-1.0, Dec_max=-1.0, band='', pixelmap_dir='', npixels=0, npixels_tot=0, proxy_level=-1): """ class to prepare and launch batches Parameters ---------------- dbName: str observing strategy name dbDir: str location dir of obs. strat. file dbExtens: str obs. strat. file extension (db or npy) scriptref: str reference script to use in the batch outDir: str output directory location nproccomp: int number of multiproc used saveData: bool to save the data on disk or not metric: str name of the metric to run toprocess: numpy array array of data to process nodither: bool to remove dither (can be usefull for DD studies) nside: int healpix nside parameter fieldType: str, opt type of field to process - DD, WFD, Fakes (default: WFD) RA_min: float, opt min RA of the area to process (default:0.0) RA_max: float, opt max RA of the area to process (default: =360.0) Dec_min: float, opt min Dec of the area to process (default: -1.0) Dec_max: float, opt max Dec of the area to process (default: -1.0) band: str, opt band to process (default: '') pixelmap_dir: str, opt location directory of pixelmaps (default: '') npixels: int, opt number of pixels to process (default: 0) npixels_tot: int, opt number of pixels initially to process (default: 0) proxy_level: int, opt proxy level for NSN metric (default: -1) """ self.dbName = dbName self.dbDir = dbDir self.dbExtens = dbExtens self.scriptref = scriptref self.outDir = outDir self.nproccomp = nproccomp self.saveData = saveData self.metric = metric self.toprocess = toprocess self.nodither = nodither self.RA_min = RA_min self.RA_max = RA_max self.Dec_min = Dec_min self.Dec_max = Dec_max self.band = band self.nside = nside self.fieldType = fieldType self.pixelmap_dir = pixelmap_dir self.npixels = npixels self.npixels_tot = npixels_tot self.proxy_level = proxy_level dirScript, name_id, log = self.prepareOut() self.script(dirScript, name_id, log, toprocess) def prepareOut(self): """ Method to prepare for the batch directories for scripts and log files are defined here. """ self.cwd = os.getcwd() dirScript = self.cwd + "/scripts" if not os.path.isdir(dirScript): os.makedirs(dirScript) dirLog = self.cwd + "/logs" if not os.path.isdir(dirLog): os.makedirs(dirLog) id = '{}_{}_{}_{}{}_{}_{}_{}_{}'.format( self.dbName, self.nside, self.fieldType, self.metric, self.nodither, self.RA_min, self.RA_max, self.Dec_min, self.Dec_max) if self.proxy_level > -1: id += '_proxy_level_{}'.format(self.proxy_level) if self.pixelmap_dir != '': id += '_frompixels_{}_{}'.format(self.npixels, self.npixels_tot) name_id = 'metric_{}'.format(id) log = dirLog + '/'+name_id+'.log' return dirScript, name_id, log def script(self, dirScript, name_id, log, proc): """ Method to generate and run the script to be executed Parameters ---------------- dirScript: str location directory of the script name_id: str id for the script log: str location directory for the log files proc: numpy array data to process """ # qsub command qsub = 'qsub -P P_lsst -l sps=1,ct=12:00:00,h_vmem=16G -j y -o {} -pe multicores {} <<EOF'.format( log, self.nproccomp) scriptName = dirScript+'/'+name_id+'.sh' # fill the script script = open(scriptName, "w") script.write(qsub + "\n") script.write("#!/bin/env bash\n") script.write(" cd " + self.cwd + "\n") script.write(" echo 'sourcing setups' \n") script.write(" source setup_release.sh Linux\n") script.write("echo 'sourcing done' \n") cmd_ = self.batch_cmd(proc) script.write(cmd_+" \n") script.write("EOF" + "\n") script.close() os.system("sh "+scriptName) def batch_cmd(self, proc): """ Method for the batch command Parameters ---------------- proc: numpy array data to process """ cmd = 'python {}.py --dbDir {} --dbName {} --dbExtens {}'.format( self.scriptref, self.dbDir, proc['dbName'], self.dbExtens) cmd += ' --nproc {} --nside {} --simuType {}'.format( proc['nproc'], proc['nside'], proc['simuType']) cmd += ' --outDir {}'.format(self.outDir) cmd += ' --fieldType {}'.format(self.fieldType) cmd += ' --saveData {}'.format(self.saveData) cmd += ' --metric {}'.format(self.metric) cmd += ' --coadd {}'.format(proc['coadd']) if self.nodither != '': cmd += ' --nodither {}'.format(self.nodither) cmd += ' --RAmin {}'.format(self.RA_min) cmd += ' --RAmax {}'.format(self.RA_max) cmd += ' --Decmin {}'.format(self.Dec_min) cmd += ' --Decmax {}'.format(self.Dec_max) if self.band != '': cmd += ' --band {}'.format(self.band) if self.pixelmap_dir != '': cmd += ' --pixelmap_dir {}'.format(self.pixelmap_dir) cmd += ' --npixels {}'.format(self.npixels) if self.proxy_level > -1: cmd += ' --proxy_level {}'.format(self.proxy_level) return cmd parser = OptionParser() parser.add_option("--dbList", type="str", default='WFD.txt', help="dbList to process [%default]") parser.add_option("--metricName", type="str", default='SNR', help="metric to process [%default]") parser.add_option("--dbDir", type="str", default='/sps/lsst/cadence/LSST_SN_PhG/cadence_db/fbs_1.4/db', help="db dir [%default]") parser.add_option("--dbExtens", type="str", default='npy', help="db extension [%default]") parser.add_option("--nodither", type="str", default='', help="db extension [%default]") parser.add_option("--splitSky", type="int", default=0, help="db extension [%default]") parser.add_option("--band", type="str", default='', help="db extension [%default]") parser.add_option("--nside", type="int", default=64, help="nside healpix parameter[%default]") parser.add_option("--fieldType", type=str, default='WFD', help="field type[%default]") parser.add_option("--pixelmap_dir", type=str, default='', help="dir where to find pixel maps[%default]") parser.add_option("--npixels", type=int, default=0, help="number of pixels to process[%default]") parser.add_option("--outDir", type=str, default='/sps/lsst/users/gris/MetricOutput_pixels', help="output directory[%default]") parser.add_option("--proxy_level", type=int, default=-1, help="proxy level - For NSN metric only[%default]") opts, args = parser.parse_args() print('Start processing...') dbList = opts.dbList metricName = opts.metricName dbDir = opts.dbDir band = opts.band dbExtens = opts.dbExtens outDir = opts.outDir nodither = opts.nodither splitSky = opts.splitSky nside = opts.nside fieldType = opts.fieldType pixelmap_dir = opts.pixelmap_dir npixels = opts.npixels proxy_level = opts.proxy_level # toprocess = np.genfromtxt(dbList, dtype=None, names=[ # 'dbName', 'simuType', 'nside', 'coadd', 'fieldType', 'nproc']) toprocess = pd.read_csv(dbList, comment='#') print('there', toprocess, type(toprocess), toprocess.size) # if toprocess.size == 1: # toprocess= np.array([toprocess]) """ proc = batchclass(dbDir, dbExtens, scriptref, outDir, nproccomp, saveData, metric, toprocess, nodither,nside, RA_min=0.0, RA_max=360.0, Dec_min=-1.0, Dec_max=-1.0,band='' """ for index, proc in toprocess.iterrows(): myproc = go_for_batch(proc, splitSky, dbDir, dbExtens, outDir, metricName, nodither, nside, fieldType, band, pixelmap_dir, npixels, proxy_level) # break
#!/usr/bin/env python # Copyright (c) 2009-2019 Quan Xu <qxuchn@gmail.com> # # This module is free software. You can redistribute it and/or modify it under # the terms of the MIT License, see the file COPYING included with this # distribution. """Build gene regulatory network""" # Python imports import os import math import re import warnings import numpy as np import pandas as pd from scipy.stats import rankdata from sklearn.preprocessing import minmax_scale import dask.dataframe as dd from dask.diagnostics import ProgressBar from loguru import logger from genomepy import Genome import pyranges as pr import ananse from ananse.utils import region_gene_overlap warnings.filterwarnings("ignore") class Network(object): def __init__( self, ncore=1, genome="hg38", gene_bed=None, include_promoter=False, include_enhancer=True, ): """[infer cell type-specific gene regulatory network] Arguments: object {[type]} -- [description] Keyword Arguments: ncore {int} -- [Specifies the number of threads to use during analysis.] (default: {1}) genome {str} -- [The genome that is used for the gene annotation and the enhancer location.] (default: {"hg38"}) gene_bed {[type]} -- [Gene annotation for the genome specified with -g as a 12 column BED file.] (default: {None}) include_promoter {bool} -- [Include or exclude promoter peaks (<= TSS +/- 2kb) in network inference.] (default: {False}) include_enhancer {bool} -- [Include or exclude enhancer peaks (> TSS +/- 2kb) in network inference.] (default: {True}) Raises: TypeError: [description] """ self.ncore = ncore self.genome = genome g = Genome(self.genome) self.gsize = g.sizes_file # # Motif information file # if pfmfile is None: # self.pfmfile = "../data/gimme.vertebrate.v5.1.pfm" # else: # self.pfmfile = pfmfile # self.motifs2factors = self.pfmfile.replace(".pfm", ".motif2factors.txt") # self.factortable = self.pfmfile.replace(".pfm", ".factortable.txt") package_dir = os.path.dirname(ananse.__file__) # Gene information file if self.genome == "hg38": if gene_bed is None: self.gene_bed = os.path.join(package_dir, "db", "hg38.genes.bed") else: self.gene_bed = gene_bed elif self.genome == "hg19": if gene_bed is None: self.gene_bed = os.path.join(package_dir, "db", "hg19.genes.bed") else: self.gene_bed = gene_bed else: if gene_bed is None: raise TypeError("Please provide a gene bed file with -a argument.") else: self.gene_bed = gene_bed # self.promoter = promoter self.include_promoter = include_promoter self.include_enhancer = include_enhancer def unique_enhancers(self, fname, chrom=None): """Extract a list of unique enhancers. Parameters ---------- fname : str File name of a tab-separated file that contains an 'enhancer' column. chrom : str, optional Only return enhancers on this chromosome. Returns ------- PyRanges object with enhancers """ p = re.compile("[:-]") logger.info("reading enhancers") # Read enhancers from binding file # This is relatively slow for a large file. May need some optimization. enhancers = pd.read_table(fname, usecols=["enhancer"])["enhancer"] enhancers = enhancers.unique() # Split into columns and create PyRanges object p = re.compile("[:-]") enhancers = pr.PyRanges( pd.DataFrame( [re.split(p, e) for e in enhancers], columns=["Chromosome", "Start", "End"], ) ) return enhancers def distance_weight( self, include_promoter=False, include_enhancer=True, alpha=1e4, maximum_distance=100_000, full_weight_region=5000, promoter_region=2000, ): """Build weight distribution based on distance to TSS. The basic idea is similar to Wang et al. [1], with some modifications. The resulting weight ranges from 0 (far from the TSS) to 1 (near the TSS) and is based on several different variables. If `include_promoter` is `True`, then distances smaller than `promoter_region` are included, otherwise they are excluded, the weight is set to 0. The `full_weight_region` parameters determines the region where the weight will be 1, regardless of distance. The `maximum_distance` parameter sets the maximum distance to consider. The weight decays with an increasing distance, starting from 1 at `full_weight_region` to 0 at `maximum_distance`. The `alpha` parameters controls the decay. Parameters ---------- include_promoer : bool, optional Include promoter regions. Default is False. include_enhancer : bool, optional Include enhancer regions, ie. regions that are distal to the promoter. alpha : float, optional Controls weight decay, default is 1e4. maximum_distance : int, optional Maximum distance from TSS to consider. Default is 100kb. full_weight_region : int, optional Distance where regions will receive the full weight. Default is 5kb. promoter_region : int, optional Promoter region, default is 2kb. Returns ------- DataFrame with two columns: distance and weight. References ---------- ..[1] Wang S, Zang C, Xiao T, Fan J, Mei S, Qin Q, Wu Q, Li X, Xu K, He HH, Brown M, Meyer CA, Liu XS. "Modeling cis-regulation with a compendium of genome-wide histone H3K27ac profiles." Genome Res. 2016 Oct;26(10):1417-1429. doi: 10.1101/gr.201574.115. PMID: 27466232 """ u = -math.log(1.0 / 3.0) * 1e5 / alpha promoter_weight = int(include_promoter) enhancer_weight = int(include_enhancer) weight1 = pd.DataFrame( { "weight": [promoter_weight for z in range(0, promoter_region + 1)], "dist": range(0, promoter_region + 1), } ) weight2 = pd.DataFrame( { "weight": [ enhancer_weight for z in range(promoter_region + 1, full_weight_region + 1) ], "dist": range(promoter_region + 1, full_weight_region + 1), } ) weight3 = pd.DataFrame( { "weight": [ enhancer_weight * 2.0 * math.exp(-u * math.fabs(z) / 1e5) / (1.0 + math.exp(-u * math.fabs(z) / 1e5)) for z in range(1, maximum_distance - full_weight_region + 1) ], "dist": range(full_weight_region + 1, maximum_distance + 1), } ) weight = pd.concat([weight1, weight2, weight3]) return weight def enhancer2gene( self, peak_pr, up=100_000, down=100_000, alpha=1e4, promoter=2000, full_weight_region=5000, ): """Couple enhancers to genes. Parameters ---------- peak_pr : PyRanges object PyRanges object with enhancer regions. up : int, optional Upstream maximum distance, by default 100kb. down : int, optional Upstream maximum distabce, by default 100kb. alpha : float, optional Parameter to control weight decay, by default 1e4. promoter : int, optional Promoter region, by default 2000. full_weight_region : int, optional Region that will receive full weight, by default 5000. Returns ------- pandas.DataFrame DataFrame with enhancer regions, gene names, distance and weight. """ genes = region_gene_overlap(peak_pr, self.gene_bed) # Get the distance from center of enhancer to TSS # Correct for extension genes["dist"] = ( (genes["Start_b"] + genes["End_b"]) / 2 - genes["Start"] ).astype(int) genes.loc[genes["Strand"] == "+", "dist"] -= up genes.loc[genes["Strand"] == "-", "dist"] -= down genes["dist"] = np.abs(genes["dist"]) # Create region in chr:start:end format genes["loc"] = ( genes["Chromosome"].astype(str) + ":" + genes["Start_b"].astype(str) + "-" + genes["End_b"].astype(str) ) # Keep the gene-enhancer combination with the smallest distance genes = genes.sort_values("dist").drop_duplicates( subset=["loc", "Name"], keep="first" ) # Return the right stuff genes = genes.set_index("loc")[["Name", "dist"]].rename( columns={"Name": "gene"} ) # Get distance-based wight weight = self.distance_weight( include_promoter=self.include_promoter, include_enhancer=self.include_enhancer, alpha=alpha, promoter_region=promoter, full_weight_region=full_weight_region, ).set_index("dist") genes = genes.join(weight, on="dist") return genes def aggregate_binding( self, binding_fname, tfs=None, up=1e5, down=1e5, alpha=None, promoter=2000, full_weight_region=5000, combine_function="sum", ): """Summarize all binding signal per gene per TF. Return a dask delayed computation object. Parameters ---------- binding_fname : str Filename of binding network. tfs : list, optional List of transcription factor names, by default None, which means that all TFs will be used. up : int, optional Maximum upstream region to include, by default 1e5 down : [type], optional Maximum downstream region to include, by default 1e5 alpha : float, optional Distance at which the weight will be half, by default None promoter : int, optional Promoter region, by default 2000 full_weight_region : int, optional Region that will receive full weight, regardless of distance, by default 5000. combine_function : str, optional How to combine signal of weighted enhancers, by default "sum". Valid options are "sum", "mean" or "max". Returns ------- dask.DataFrame DataFrame with delayed computations. """ if combine_function not in ["mean", "max", "sum"]: raise ValueError( "Unknown combine function, valid options are: mean, max, sum" ) maximum_distance = max(up, down) if alpha is None: alpha = maximum_distance / 10 if promoter > maximum_distance: raise ValueError( "promoter region is larger than the maximum distance to use" ) # Get list of unique enhancers from the binding file enhancer_pr = self.unique_enhancers(binding_fname) # Link enhancers to genes on basis of distance to annotated TSS gene_df = self.enhancer2gene( enhancer_pr, up=up, down=down, alpha=alpha, promoter=promoter, full_weight_region=full_weight_region, ) # print(gene_df) logger.info("Reading binding file...") ddf = dd.read_csv( binding_fname, sep="\t", usecols=["factor", "enhancer", "binding"], ) if tfs is not None: ddf = ddf[ddf["factor"].isin(tfs)] # Merge binding information with gene information. # This may be faster than creating index on enhancer first, but need to check! tmp = ddf.merge(gene_df, left_on="enhancer", right_index=True) # Remove everything with weight 0 tmp = tmp[tmp["weight"] > 0] # Modify the binding by the weight, which is based on distance to TSS tmp["weighted_binding"] = tmp["weight"] * tmp["binding"] logger.info("Grouping by tf and target gene...") # Using one column that combines TF and target as dask cannot handle MultiIndex tmp["tf_target"] = tmp["factor"] + "_" + tmp["gene"] tmp = tmp.groupby("tf_target")[["weighted_binding"]] if combine_function == "mean": tmp = tmp.mean() elif combine_function == "max": tmp = tmp.max() elif combine_function == "sum": tmp = tmp.sum() logger.info("Done grouping...") return tmp def create_expression_network( self, fin_expression, column="tpm", tfs=None, rank=True, bindingfile=None ): """Create a gene expression based network. Based on a file with gene expression levels (a TPM column), a dask DataFrame is generated with the combined expression levels of the tf and the target gene. By default, the expresison levels are ranked and subsequently scaled between 0 and 1. Parameters ---------- fin_expression : str or list Filename of file that contains gene expression data (TPM), or a list of filenames. First column should contain the gene name. column : str, optional Column name that contains gene expression, 'tpm' by default. tfs : list, optional List of TF gene names. All TFs will be used by default. rank : bool, optional Rank expression levels before scaling. bindingfile : str, optional Filename with binding information. Returns ------- Dask DataFrame with gene expression based values. """ # Convert it to a list if it's not a list of files, but a single file name if isinstance(fin_expression, str): fin_expression = [fin_expression] # Read all expression input files and take the mean expression per gene expression = pd.DataFrame( pd.concat( [pd.read_table(f, index_col=0)[[column]] for f in fin_expression], axis=1, ).mean(1), columns=[column], ) expression[column] = np.log2(expression[column] + 1e-5) # Create the target gene list, based on all genes expression.index.rename("target", inplace=True) expression = expression.reset_index() expression = expression.rename(columns={"tpm": "target_expression"}) # Create the TF list, based on valid transcription factors if tfs is None: activity_fname = bindingfile.replace("binding.tsv", "factor_activity.tsv") if os.path.exists(activity_fname): tfs = list( set(pd.read_table(activity_fname, index_col=0).index.tolist()) ) else: package_dir = os.path.dirname(ananse.__file__) tffile = os.path.join(package_dir, "db", "tfs.txt") tfs = pd.read_csv(tffile, header=None)[0].tolist() tfs = expression[expression.target.isin(tfs)] tfs = tfs.reset_index() tfs = tfs.drop(columns=["index"]) tfs.rename( columns={"target": "tf", "target_expression": "tf_expression"}, inplace=True ) expression["key"] = 0 tfs["key"] = 0 # Merge TF and target gene expression information network = expression.merge(tfs, how="outer") network = network[["tf", "target", "tf_expression", "target_expression"]] # Rank and scale for col in ["tf_expression", "target_expression"]: if rank: network[col] = rankdata(network[col]) network[col] = minmax_scale(network[col]) # Use one-column index that contains TF and target genes. # This is necessary for dask, as dask cannot merge on a MultiIndex. # Otherwise this would be an inefficient and unnecessary step. network["tf_target"] = network["tf"] + "_" + network["target"] network = network.set_index("tf_target").drop(columns=["target"]) # Convert to a dask DataFrame. logger.info("creating expression dataframe") network = dd.from_pandas(network, npartitions=30) return network def run_network( self, binding, fin_expression=None, tfs=None, corrfiles=None, outfile=None, up=1e5, down=1e5, alpha=None, promoter=2000, full_weight_region=5000, ): """Create network. Parameters ---------- binding : str Filename with binding information. Should contain at least three columns: "factor", "enhancer" and "binding". fin_expression : str or list, optional Filename of list of filenames with expression information. tfs : list, optional List of transcription factors to use, by default None, which means all TFs will be used. corrfiles : [type], optional Correlation files by default None. CURRENTLY UNUSED. outfile : str, optional Output file. up : int, optional Upstream maximum distance, by default 100kb. down : int, optional Upstream maximum distabce, by default 100kb. alpha : float, optional Parameter to control weight decay, by default 1e4. promoter : int, optional Promoter region, by default 2000. full_weight_region : int, optional Region that will receive full weight, by default 5000.""" # Expression base network logger.info("Loading expression") df_expression = self.create_expression_network( fin_expression, tfs=tfs, rank=True, bindingfile=binding ) # Use a version of the binding network, either promoter-based, enhancer-based # or both. if self.include_promoter or self.include_enhancer: logger.info("Aggregate binding") df_binding = self.aggregate_binding( binding, tfs=tfs, up=up, down=down, alpha=alpha, promoter=promoter, full_weight_region=full_weight_region, combine_function="sum", ) activity_fname = binding.replace("binding.tsv", "factor_activity.tsv") if os.path.exists(activity_fname): logger.info("Reading factor activity") act = pd.read_table(activity_fname, index_col=0) act.index.name = "tf" act["activity"] = minmax_scale(rankdata(act["activity"], method="min")) df_expression = df_expression.merge( act, right_index=True, left_on="tf", how="left" ).fillna(0.5) df_expression = df_expression.drop(columns=["tf"]) # This is where the heavy lifting of all delayed computations gets done logger.info("Computing network") if fin_expression is not None: with ProgressBar(): result = df_expression.join(df_binding) result = result.compute() result = result.fillna(0) else: result = df_binding result["weighted_binding"] = minmax_scale( rankdata(result["weighted_binding"], method="min") ) columns = [ "tf_expression", "target_expression", "weighted_binding", "activity", ] columns = [col for col in columns if col in result] logger.info(f"Using {', '.join(columns)}") # Combine the individual scores result["prob"] = result[columns].mean(1) else: result = df_expression result["prob"] = result[["tf_expression", "target_expression"]].mean(1) result = result.compute() logger.info("Saving file") result[["prob"]].to_csv(outfile, sep="\t")
import time import torch import numpy as np def get_model(dataset, centroids, dataset_type, n_features, n_cluster): if dataset_type == "dense_libsvm": return KMeans(dataset, centroids) elif dataset_type == "sparse_libsvm": return SparseKMeans(dataset, centroids, n_features, n_cluster) class KMeans(object): def __init__(self, data, centroids, centroid_type='numpy'): self.X = data if centroid_type == 'numpy': self.centroids = centroids elif centroid_type == 'tensor': # input centroids is a pytorch tensor self.centroids = centroids.numpy() self.error = np.finfo(np.float32).max @staticmethod def euclidean_dist(a, b, axis=1): return np.mean(np.linalg.norm(a - b, axis=axis)) def closest_centroid(self): """returns an array containing the index to the nearest centroid for each point""" batch_size = 5000 remaining = self.X.shape[0] current_batch = 0 argmin_dist = [] while True: if remaining <= batch_size: dist = np.full((remaining, self.centroids.shape[0]), np.inf) for i in range(self.centroids.shape[0]): dist[:, i] = np.sum( np.square(self.X[current_batch:current_batch + remaining] - self.centroids[i, :]), axis=1) argmin_dist.append(np.argmin(dist, axis=1)) break else: dist = np.full((batch_size, self.centroids.shape[0]), np.inf) for i in range(self.centroids.shape[0]): dist[:, i] = np.sum( np.square(self.X[current_batch:current_batch + batch_size] - self.centroids[i, :]), axis=1) argmin_dist.append(np.argmin(dist, axis=1)) current_batch += batch_size remaining -= batch_size res = argmin_dist[0] for i in range(1, len(argmin_dist)): res = np.concatenate((res, argmin_dist[i]), axis=None) return res def update_centroids(self, closest): """returns the new centroids assigned from the points closest to them""" x = np.array([self.X[closest == k].mean(axis=0) for k in range(self.centroids.shape[0])]) return np.nan_to_num(x) def find_nearest_cluster(self): # centroids.shape: nr_clusters, data_dim closest = self.closest_centroid() new_centroids = self.update_centroids(closest) self.error = self.euclidean_dist(self.centroids, new_centroids) self.centroids = new_centroids return def get_centroids(self, centroids_type): if centroids_type == "numpy": return self.centroids if centroids_type == "tensor": return torch.tensor(self.centroids) class SparseKMeans(object): def __init__(self, _data, _centroids, _n_feature, _n_cluster): self.data = _data self.n_feature = _n_feature self.centroids = [c.clone().detach().reshape(1, self.n_feature).to_sparse() for c in _centroids] self.n_cluster = _n_cluster self.error = np.finfo(np.float32).max self.model = torch.zeros(self.n_feature, 1) @staticmethod def euclidean_dist(x1, x2): diff = torch.sparse.FloatTensor.sub(x1, x2) sq_diff = torch.sparse.FloatTensor.mul(diff, diff) dist_sum = torch.sparse.sum(sq_diff) # diff = torch.sub(x1.to_dense(), x2.to_dense()) # sq_diff = torch.mul(diff, diff) # dist_sum = torch.sum(sq_diff) return dist_sum def closest_centroid(self): start = time.time() argmin_dist = np.zeros(len(self.data)) for i in range(len(self.data)): min_sum = np.inf idx = 0 for j in range(len(self.centroids)): tmp = self.euclidean_dist(self.data[i], self.centroids[j]) if tmp < min_sum: idx = j min_sum = tmp argmin_dist[i] = idx print(f"Find closest centroids takes {time.time() - start}s") return np.array(argmin_dist, np.uint8) def move_centroids(self, closest): start = time.time() c_mean = [torch.sparse.FloatTensor(self.centroids[0].size()[0], self.centroids[0].size()[1]) for i in range(self.n_cluster)] c_count = [0 for i in range(self.n_cluster)] for i in range(len(self.data)): c_mean[closest[i]] = torch.sparse.FloatTensor.add(self.data[i], c_mean[closest[i]]) c_count[closest[i]] += 1 for i in range(self.n_cluster): c_mean[i] = torch.sparse.FloatTensor.div(c_mean[i], c_count[i]) print(f"Allocate data to new centroids takes {time.time() - start}s") return c_mean def get_error(self, new_cent): start = time.time() tmp = self.euclidean_dist(new_cent[0], self.centroids[0]) for i in range(1, self.n_cluster): tmp = torch.sparse.FloatTensor.add(self.euclidean_dist(new_cent[i], self.centroids[i]), tmp) print(f"Compute error takes {time.time() - start}s") return torch.sparse.FloatTensor.div(tmp, self.n_cluster) def find_nearest_cluster(self): print("Start computing kmeans...") closest = self.closest_centroid() new_centroids = self.move_centroids(closest) self.error = self.get_error(new_centroids).item() self.centroids = new_centroids return def get_centroids(self, centroids_type): if centroids_type == "sparse_tensor": return self.centroids elif centroids_type == "numpy": cent_lst = [self.centroids[i].to_dense().numpy() for i in range(self.n_cluster)] centroid_np = np.array(cent_lst).reshape(self.n_cluster, self.n_feature) return centroid_np elif centroids_type == "dense_tensor": cent_tensor_lst = [self.centroids[i].to_dense() for i in range(self.n_cluster)] return torch.stack(cent_tensor_lst) else: raise Exception("centroid type can only be sparse_tensor, dense_tensor, or numpy")
#!/usr/bin/env python # # Copyright (c) 2019 Intel Corporation # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. # """ This module provides a ROS autonomous agent interface to control the ego vehicle via a ROS stack """ import math import os import subprocess import signal import threading import time import numpy import carla import tf import rospy from cv_bridge import CvBridge from geometry_msgs.msg import PoseStamped, TwistWithCovariance, TwistStamped from nav_msgs.msg import Odometry, Path from rosgraph_msgs.msg import Clock from sensor_msgs.msg import Image, PointCloud2, NavSatFix, NavSatStatus, CameraInfo, Range, PointField, Imu from sensor_msgs.point_cloud2 import create_cloud_xyz32, create_cloud from std_msgs.msg import Header, String from srunner.scenariomanager.carla_data_provider import * from leaderboard.autoagents.autonomous_agent import AutonomousAgent, Track def get_entry_point(): return 'RosAgent' class RosAgent(AutonomousAgent): """ Base class for ROS-based stacks. Derive from it and implement the sensors() method. Please define TEAM_CODE_ROOT in your environment. The stack is started by executing $TEAM_CODE_ROOT/start.sh The sensor data is published on similar topics as with the carla-ros-bridge. You can find details about the utilized datatypes there. This agent expects a roscore to be running. """ speed = None current_control = None stack_process = None current_map_name = None step_mode_possible = None vehicle_info_publisher = None global_plan_published_time = None start_script = None manual_data_debug = False counter = 0 open_drive_map_data = None open_drive_map_name = None def setup(self, path_to_conf_file): """ setup agent """ self.track = Track.MAP self.agent_role_name = os.environ['AGENT_ROLE_NAME'] self.bridge_mode = os.environ['OP_BRIDGE_MODE'] self.topic_base = "/carla/{}".format(self.agent_role_name) self.topic_waypoints = self.topic_base + "/waypoints" self.stack_thread = None self.counter = 0 self.open_drive_map_name = None self.open_drive_map_data = None # get start_script from environment team_code_path = os.environ['TEAM_CODE_ROOT'] if not team_code_path or not os.path.exists(team_code_path): raise IOError("Path '{}' defined by TEAM_CODE_ROOT invalid".format(team_code_path)) self.start_script = "{}/start.sh".format(team_code_path) if not os.path.exists(self.start_script): raise IOError("File '{}' defined by TEAM_CODE_ROOT invalid".format(self.start_script)) # set use_sim_time via commandline before init-node process = subprocess.Popen( "rosparam set use_sim_time true", shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) process.wait() if process.returncode: raise RuntimeError("Could not set use_sim_time") # initialize ros node rospy.init_node('op_ros_agent', anonymous=True) # publish first clock value '0' self.clock_publisher = rospy.Publisher('clock', Clock, queue_size=10, latch=True) self.clock_publisher.publish(Clock(rospy.Time.from_sec(0))) self.timestamp = None self.speed = 0 #publish global path every 2 seconds self.global_plan_published_time = 0 self.vehicle_status_publisher = None self.vehicle_imu_publisher = None self.map_file_publisher = None self.current_map_name = None self.step_mode_possible = False self.vehicle_control_subscriber = rospy.Subscriber( '/carla_op_controller_cmd', TwistStamped, self.on_vehicle_control) self.current_control = carla.VehicleControl() self.waypoint_publisher = rospy.Publisher( self.topic_waypoints, Path, queue_size=1, latch=True) self.publisher_map = {} self.id_to_sensor_type_map = {} self.id_to_camera_info_map = {} self.cv_bridge = CvBridge() # setup ros publishers for sensors # pylint: disable=line-too-long for sensor in self.sensors(): self.id_to_sensor_type_map[sensor['id']] = sensor['type'] if sensor['type'] == 'sensor.camera.rgb': self.publisher_map[sensor['id']] = rospy.Publisher( self.topic_base + '/camera/rgb/' + sensor['id'] + "/image_color", Image, queue_size=1, latch=True) self.id_to_camera_info_map[sensor['id']] = self.build_camera_info(sensor) self.publisher_map[sensor['id'] + '_info'] = rospy.Publisher( self.topic_base + '/camera/rgb/' + sensor['id'] + "/camera_info", CameraInfo, queue_size=1, latch=True) elif sensor['type'] == 'sensor.lidar.ray_cast': self.publisher_map[sensor['id']] = rospy.Publisher( self.topic_base + '/lidar/' + sensor['id'] + "/point_cloud", PointCloud2, queue_size=1, latch=True) elif sensor['type'] == 'sensor.other.gnss': self.publisher_map[sensor['id']] = rospy.Publisher( self.topic_base + '/gnss/' + sensor['id'] + "/fix", NavSatFix, queue_size=1, latch=True) elif sensor['type'] == 'sensor.speedometer': if not self.vehicle_status_publisher: self.vehicle_status_publisher = rospy.Publisher( self.topic_base + '/odometry', Odometry, queue_size=1, latch=True) elif sensor['type'] == 'sensor.other.imu': if not self.vehicle_imu_publisher: self.vehicle_imu_publisher = rospy.Publisher( self.topic_base + '/imu', Imu, queue_size=1, latch=True) elif sensor['type'] == 'sensor.opendrive_map': if not self.map_file_publisher: self.map_file_publisher = rospy.Publisher('/carla/map_file', String, queue_size=1, latch=True) else: raise TypeError("Invalid sensor type: {}".format(sensor['type'])) # pylint: enable=line-too-long def init_local_agent(self, role_name, map_name, waypoints_topic_name, enable_explore): rospy.loginfo("Executing stack...") print("Executing stack...", role_name, map_name) local_start_script = self.start_script + ' ' + role_name + ' ' + map_name + ' ' + enable_explore + ' ' + waypoints_topic_name self.stack_process = subprocess.Popen(local_start_script, shell=True, preexec_fn=os.setpgrp) # self.vehicle_control_event = threading.Event() def write_opendrive_map_file(self, map_name, map_data): team_code_path = os.environ['TEAM_CODE_ROOT'] if not team_code_path or not os.path.exists(team_code_path): raise IOError("Path '{}' defined by TEAM_CODE_ROOT invalid".format(team_code_path)) opendrive_map_path = "{}/hdmaps/{}.xodr".format(team_code_path, map_name) f = open(opendrive_map_path, "w") f.write(map_data) f.close() def on_vehicle_control(self, data): """ callback if a new vehicle control command is received """ cmd = carla.VehicleControl() cmd.throttle = data.twist.linear.x/100.0 cmd.steer = data.twist.angular.z/100.0 cmd.brake = data.twist.linear.y/100.0 #print('Received Command : ', cmd.throttle, cmd.steer) # if cmd.throttle < 0: # cmd.reverse = 1 # else: # cmd.reverse = 0 #cmd.gear = 1 #cmd.manual_gear_shift = data.manual_gear_shift self.current_control = cmd # if not self.vehicle_control_event.is_set(): # self.vehicle_control_event.set() # After the first vehicle control is sent out, it is possible to use the stepping mode self.step_mode_possible = True def build_camera_info(self, attributes): # pylint: disable=no-self-use """ Private function to compute camera info camera info doesn't change over time """ camera_info = CameraInfo() # store info without header camera_info.header = None camera_info.width = int(attributes['width']) camera_info.height = int(attributes['height']) camera_info.distortion_model = 'plumb_bob' cx = camera_info.width / 2.0 cy = camera_info.height / 2.0 fx = camera_info.width / ( 2.0 * math.tan(float(attributes['fov']) * math.pi / 360.0)) fy = fx camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1] camera_info.D = [0, 0, 0, 0, 0] camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0] camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0] return camera_info def publish_plan(self): """ publish the global plan """ msg = Path() msg.header.frame_id = "map" msg.header.stamp = rospy.Time.now() for wp in self._global_plan_world_coord: pose = PoseStamped() pose.pose.position.x = wp[0].location.x pose.pose.position.y = -wp[0].location.y pose.pose.position.z = wp[0].location.z quaternion = tf.transformations.quaternion_from_euler( 0, 0, -math.radians(wp[0].rotation.yaw)) pose.pose.orientation.x = quaternion[0] pose.pose.orientation.y = quaternion[1] pose.pose.orientation.z = quaternion[2] pose.pose.orientation.w = quaternion[3] msg.poses.append(pose) #rospy.loginfo("Publishing Plan...") self.waypoint_publisher.publish(msg) def sensors(self): sensors = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'width': 1280, 'height': 720, 'fov': 100, 'id': 'Center'}, {'type': 'sensor.lidar.ray_cast', 'x': 0.0, 'y': 0.0, 'z': 2.40, 'roll': 0.0, 'pitch': 0.0, 'yaw': -90, 'id': 'LIDAR'}, {'type': 'sensor.other.gnss', 'x': 0.0, 'y': 0.0, 'z': 1.60, 'id': 'GPS'}, {'type': 'sensor.opendrive_map', 'reading_frequency': 1, 'id': 'OpenDRIVE'}, {'type': 'sensor.speedometer', 'reading_frequency': 10, 'id': 'speed'}, {'type': 'sensor.other.imu', 'x': 0.0, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0, 'id': 'IMU'}, ] return sensors def get_header(self): """ Returns ROS message header """ header = Header() header.stamp = rospy.Time.from_sec(self.timestamp) return header def publish_lidar(self, sensor_id, data): """ Function to publish lidar data """ header = self.get_header() header.frame_id = 'velodyne' lidar_data = numpy.frombuffer(data, dtype=numpy.float32) if lidar_data.shape[0] % 4 == 0: lidar_data = numpy.reshape(lidar_data, (int(lidar_data.shape[0] / 4), 4)) # we take the oposite of y axis # (as lidar point are express in left handed coordinate system, and ros need right handed) # we need a copy here, because the data are read only in carla numpy # array lidar_data = lidar_data # we also need to permute x and y lidar_data = lidar_data[..., [1, 0, 2, 3]] fields = [PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1), PointField('intensity', 12, PointField.FLOAT32, 1)] msg = create_cloud(header,fields, lidar_data) self.publisher_map[sensor_id].publish(msg) else: print('Cannot Reshape LIDAR Data buffer') def publish_gnss(self, sensor_id, data): """ Function to publish gnss data """ msg = NavSatFix() msg.header = self.get_header() msg.header.frame_id = 'gps' msg.latitude = data[0] msg.longitude = data[1] msg.altitude = data[2] msg.status.status = NavSatStatus.STATUS_SBAS_FIX # pylint: disable=line-too-long msg.status.service = NavSatStatus.SERVICE_GPS | NavSatStatus.SERVICE_GLONASS | NavSatStatus.SERVICE_COMPASS | NavSatStatus.SERVICE_GALILEO # pylint: enable=line-too-long self.publisher_map[sensor_id].publish(msg) def publish_camera(self, sensor_id, data): """ Function to publish camera data """ msg = self.cv_bridge.cv2_to_imgmsg(data, encoding='bgra8') # the camera data is in respect to the camera's own frame msg.header = self.get_header() msg.header.frame_id = 'camera' cam_info = self.id_to_camera_info_map[sensor_id] cam_info.header = msg.header self.publisher_map[sensor_id + '_info'].publish(cam_info) self.publisher_map[sensor_id].publish(msg) def publish_imu(self, sensor_id, data): """ Publish IMU data """ imu_msg = Imu() imu_msg.header = self.get_header() # Carla uses a left-handed coordinate convention (X forward, Y right, Z up). # Here, these measurements are converted to the right-handed ROS convention # (X forward, Y left, Z up). imu_msg.linear_acceleration.x = data[0] imu_msg.linear_acceleration.y = -data[1] imu_msg.linear_acceleration.z = data[2] imu_msg.angular_velocity.x = -data[3] imu_msg.angular_velocity.y = data[4] imu_msg.angular_velocity.z = -data[5] imu_rotation = data[6] quaternion = tf.transformations.quaternion_from_euler( 0, 0, -math.radians(imu_rotation)) imu_msg.orientation.x = quaternion[0] imu_msg.orientation.y = quaternion[1] imu_msg.orientation.z = quaternion[2] imu_msg.orientation.w = quaternion[3] self.vehicle_imu_publisher.publish(imu_msg) def publish_can(self, sensor_id, data): """ publish can data """ twist_msg = TwistWithCovariance() twist_msg.twist.linear.x = data['speed'] if twist_msg.twist.linear.x < 0: twist_msg.twist.linear.x = 0 twist_msg.twist.angular.z = -self.current_control.steer twist_msg.twist.linear.z = 1 # to tell OpenPlanner to use the steer directly twist_msg.twist.angular.x = 1 # to tell OpenPlanner to use the steer directly #print('Current Status : ', msg.twist.linear.x, ', Steer: ', msg.twist.angular.z) odo_msg = Odometry() odo_msg.header = self.get_header() odo_msg.twist = twist_msg self.vehicle_status_publisher.publish(odo_msg) def publish_hd_map(self, sensor_id, data, map_name): """ publish hd map data """ if self.current_map_name != map_name: self.current_map_name = map_name if self.map_file_publisher: self.map_file_publisher.publish(data['opendrive']) def use_stepping_mode(self): # pylint: disable=no-self-use """ Overload this function to use stepping mode! """ return False def run_step(self, input_data, timestamp): """ Execute one step of navigation. """ town_map_name = self._get_map_name(CarlaDataProvider.get_map().name) if self.stack_process is None and town_map_name is not None and self.open_drive_map_data is not None: self.write_opendrive_map_file(self.open_drive_map_name, self.open_drive_map_data) if self.bridge_mode == 'free' or self.bridge_mode == 'srunner': self.init_local_agent(self.agent_role_name, town_map_name, '', 'true') elif self.bridge_mode == 'leaderboard': self.init_local_agent(self.agent_role_name, town_map_name, self.topic_waypoints, 'false') else: self.init_local_agent(self.agent_role_name, town_map_name, self.topic_waypoints, 'false') # publish global plan to ROS once after initialize the stack if self._global_plan_world_coord: self.publish_plan() # self.vehicle_control_event.clear() self.timestamp = timestamp self.clock_publisher.publish(Clock(rospy.Time.from_sec(timestamp))) # check if stack is still running if self.stack_process and self.stack_process.poll() is not None: raise RuntimeError("Stack exited with: {} {}".format( self.stack_process.returncode, self.stack_process.communicate()[0])) #wait 2 second before publish the global path if self._global_plan_world_coord and (self.timestamp - self.global_plan_published_time) > 2.0: self.global_plan_published_time = self.timestamp self.publish_plan() # publish data of all sensors for key, val in input_data.items(): sensor_type = self.id_to_sensor_type_map[key] if self.manual_data_debug: print(key) if sensor_type == 'sensor.camera.rgb': self.publish_camera(key, val[1]) elif sensor_type == 'sensor.opendrive_map': # extract map name self.open_drive_map_data = val[1]['opendrive'] self.open_drive_map_name = self._get_map_name(CarlaDataProvider.get_map().name) self.publish_hd_map(key, val[1], self.open_drive_map_name) #Extract dictionary with map data and transform and odometry elif sensor_type == 'sensor.other.gnss': self.publish_gnss(key, val[1]) elif sensor_type == 'sensor.lidar.ray_cast': self.publish_lidar(key, val[1]) elif sensor_type == 'sensor.speedometer': self.publish_can(key, val[1]) elif sensor_type == 'sensor.other.imu': self.publish_imu(key, val[1]) elif self.manual_data_debug: print('Additional Sensor !! ') print(key) # count_out = 500 # # if self.open_drive_map_name == 'Town01' or self.open_drive_map_name == 'Town03': # # count_out = 200 # if self.counter > count_out: # raise TypeError("Just Stop ................. Please ") # self.counter = self.counter + 1 # if self.open_drive_map_name == 'Town01' : # raise TypeError("Just Stop ................. Please ") return self.current_control def destroy(self): """ Cleanup of all ROS publishers """ if self.stack_process and self.stack_process.poll() is None: rospy.loginfo("Sending SIGTERM to stack...") os.killpg(os.getpgid(self.stack_process.pid), signal.SIGTERM) rospy.loginfo("Waiting for termination of stack...") self.stack_process.wait() time.sleep(5) rospy.loginfo("Terminated stack.") rospy.loginfo("Stack is no longer running") if self.map_file_publisher: self.map_file_publisher.unregister() if self.vehicle_status_publisher: self.vehicle_status_publisher.unregister() if self.vehicle_info_publisher: self.vehicle_info_publisher.unregister() if self.waypoint_publisher: self.waypoint_publisher.unregister() if self.stack_process: self.stack_process = None #raise TypeError("Just Stop ................. Please ") rospy.loginfo("Cleanup finished") def _get_map_name(self, map_full_name): if map_full_name is None: return None name_start_index = map_full_name.rfind("/") if name_start_index == -1: name_start_index = 0 else: name_start_index = name_start_index + 1 return map_full_name[name_start_index:len(map_full_name)]
import os import json import pickle import argparse import numpy as np import pandas as pd from tqdm import tqdm from skimage import io as skio if __name__ == "__main__": """ External parameters """ parser = argparse.ArgumentParser(description="Interface for downloading/exploring data from database and extracting features for QCB") parser.add_argument("-m", "--mode", help="download (download metadata), \ feature (feature extraction) \ image (save cell images in static folder)\ check (check dataframe produced by feature extraction)\ process (process tables for further analyzes)", required=True) parser.add_argument("-c", "--config", help="Path to config json", required=True) parser.add_argument("-s", "--start", nargs="?", type=int, default=0, const=0, required=False) args = vars(parser.parse_args()) """ Loading configuration form JSON file """ with open(args["config"], "r") as fjson: config_json = json.load(fjson) """ If download mode """ if args["mode"] == "download": import datasetdatabase as dsdb # # Multiprocessing # os.environ["DSDB_PROCESS_LIMIT"] = "16" # # Loading metadata from database # prod = dsdb.DatasetDatabase(config=config_json["database"]) ds_meta = prod.get_dataset(name=config_json["meta"]) with open(os.path.join("../data-raw/",config_json["meta"]+".pkl"), "wb") as fp: pickle.dump(ds_meta.ds,fp) """ If feature extraction mode """ if args["mode"] == "feature": from aicsfeature import extractor # # Loading metadata # with open(os.path.join("../data-raw/",config_json["meta"]+".pkl"), "rb") as fp: df_meta = pickle.load(fp) # # Indexing metadata dataframe by id specified in config_json. Keep in mind # that this id will not be maintained in database and a explicity id column # is required to be created before uploading a dataset. # df_meta = df_meta.set_index(config_json["id"]) # # For each structure in the config file # for struct in config_json["run"]: if struct["status"] == "on": # # Finding all cell with that structure # print("\nImage Type:[", struct["structure_name"], "]\n") if struct["structure_name"] not in ["cell", "dna"]: df_meta_struct = df_meta.loc[df_meta.structure_name==struct["structure_name"]] else: df_meta_struct = df_meta.copy() df_features = pd.DataFrame([]) for row in tqdm(range(df_meta_struct.shape[0])): if row >= args["start"]: # # Load images # cid = df_meta_struct.index[row] seg_path = os.path.join(config_json["cell_info"], cid, config_json["seg_prefix"]) raw_path = os.path.join(config_json["cell_info"], cid, config_json["seg_prefix"]) SEG = skio.imread(seg_path) RAW = skio.imread(raw_path) # # Images to be used # str_ch = struct["channel"] input_img = RAW[str_ch,:,:,:] input_mask = SEG[str_ch,:,:,:] input_mask2 = None if "channel2" not in struct else SEG[struct["channel2"],:,:,:] # # Feature extraction for each cell # df_features_cell = extractor.get_features(input_img=input_img, input_mask=input_mask, info=struct["info"], input_mask2=input_mask2) df_features = df_features.append(df_features_cell, ignore_index=True, sort=True) if not args["start"]: df_features.index = df_meta_struct.index # # Save as pickle # with open(os.path.join("../data-raw/",struct["save_as"]), "wb") as fp: pickle.dump(df_features,fp) """ If image mode """ if args["mode"] == "image": # # Loading metadata # with open(os.path.join("../data-raw/",config_json["meta"]+".pkl"), "rb") as fp: df_meta = pickle.load(fp) df_meta = df_meta.set_index(config_json["id"]) mem_ch = config_json["mem_channel"] dna_ch = config_json["dna_channel"] str_ch = config_json["str_channel"] """ Checking whether static/imgs exist """ if not os.path.isdir("../engine/app/static/imgs/"): os.makedirs("../engine/app/static/imgs/") """ Creates an image with custom colors """ def fix_cell_color(img): for i in [str_ch,dna_ch,mem_ch]: for j in [str_ch,dna_ch,mem_ch]: if i != j: img[img[:,:,i]>0,j] = 0 img[np.all(img==0,axis=str_ch),:] = 255 return img def get_cell_image(cell_img_path): img = skio.imread(cell_img_path) img = np.max(img,axis=mem_ch) img = np.swapaxes(img,dna_ch,str_ch) img = img[:,:,:3] img = fix_cell_color(img) return img for row in tqdm(range(df_meta.shape[0])): cid = df_meta.index[row] img = get_cell_image(os.path.join(config_json["cell_info"],cid,config_json["seg_prefix"])) skio.imsave(os.path.join('../engine/app/static/imgs',cid+'.jpg'),img) """ If check mode """ if args["mode"] == "check": import time # # For each structure in the config file # report = pd.DataFrame([]) for struct in config_json["run"]: # # Finding all cell with that structure # mod_date = time.ctime(os.path.getmtime(os.path.join("../data-raw/",struct["save_as"]))) with open(os.path.join("../data-raw/",struct["save_as"]), "rb") as fp: df = pickle.load(fp) print("\n## "+struct["save_as"]+" ##\n") print(df.head()) report = report.append({"name": struct["save_as"].replace(".pkl",""), "rows": df.shape[0], "cols": df.shape[1], "modified": mod_date}, sort=True, ignore_index=True) report[["cols","rows"]] = report[["cols","rows"]].astype(np.int) print(report[["name","cols","rows","modified"]]) """ If process mode """ if args["mode"] == "process": # # Loading metadata # with open(os.path.join("../data-raw/",config_json["meta"]+".pkl"), "rb") as fp: df_full = pickle.load(fp) df_full = df_full.set_index(config_json["id"]) # # Loading features # for struct in config_json["run"]: if struct["status"] == "on": # # Finding all cell with that structure # with open(os.path.join("../data-raw/",struct["save_as"]), "rb") as fp: df_str_fea = pickle.load(fp) df_full = df_full.join(df_str_fea) df_full.to_csv("../engine/data-processed/data.csv")
import numpy as np import torch from torchvision import datasets from torchvision import transforms from torch.utils.data.dataset import Dataset class CIFAR10LabelDataset(Dataset): def __init__(self, data, ydata, transform): self.data = data self.ydata = ydata self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, idx): x = self.data[idx] label = self.ydata[idx] x = np.array(x) if self.transform!=None: x = self.transform(x) return x, label def get_cifar10_data(normal_class): CIFAR10_PATH = "./data/" # extract data and targets train_data = datasets.CIFAR10(root=CIFAR10_PATH, train=True, download=True) x_train, y_train=train_data.data,train_data.targets test_data=datasets.CIFAR10(root=CIFAR10_PATH, train=False, download=True) x_test, y_test=test_data.data,test_data.targets outlier_classes = list(range(0, 10)) for c in normal_class: outlier_classes.remove(c) normal_x_train = [] #train data normal_y_train = [] #train label outlier_x_test = [] #outlier data for final testing outlier_y_test = [] #outlier label for final testing for i in range(0, len(y_train)): if y_train[i] in normal_class: normal_x_train.append(x_train[i]) normal_y_train.append(0) else: outlier_x_test.append(x_train[i]) outlier_y_test.append(1) normal_x_val = [] #train data normal_y_val = [] #train label for i in range(0, len(y_test)): if y_test[i] in normal_class: normal_x_val.append(x_test[i]) normal_y_val.append(0) else: outlier_x_test.append(x_test[i]) outlier_y_test.append(1) return normal_x_train, normal_y_train, normal_x_val, normal_y_val, outlier_x_test, outlier_y_test
import contextlib import os from pathlib import Path from typing import Callable, Union import numpy as np def arr2str(a: np.ndarray, format_='e', ndigits=2) -> str: """convert ndarray of floats to a string expression. :param a: :param format_: :param ndigits: :return: """ return np.array2string( a, formatter=dict( float_kind=(lambda x: f'{x:.{ndigits}{format_}}' if x != 0 else '0') ) ) def print_to_file(fname: Union[str, Path], fn: Callable, args=None, kwargs=None): """ All `print` function calls in `fn(*args, **kwargs)` uses a text file `fname`. :param fname: :param fn: :param args: args for fn :param kwargs: kwargs for fn :return: """ if fname: fname = Path(fname).with_suffix('.txt') if args is None: args = tuple() if kwargs is None: kwargs = dict() with (fname.open('w') if fname else open(os.devnull, 'w')) as file: with contextlib.redirect_stdout(file): fn(*args, **kwargs)
import numpy as np from scipy import linalg from ..base import Preprocessor from ..statistics import ( calculate_within_class_scatter_matrix, calculate_between_class_scatter_matrix, ) __all__ = [ "LinearDiscriminantAnalysis", ] class LinearDiscriminantAnalysis(Preprocessor): """Linear Discriminant Analysis (LDA) A generalization of Fisher's linear discriminant, a method usedin statistics, pattern recognition and machine learning to find a linear combination of features that characterizes or separates two or more classes of objects or events. The resulting combination may be used as a linear classifier, or, more commonly, for dimensionality reduction before later classification. Parameters: ----------- n_components : integer """ def __init__(self, n_components=2): self.n_components = n_components self.components = None self.explained_variance = [] self.explained_variance_ratio = [] def fit(self, X, y): X, y = super().fit(X, y) # Computing the Scatter Matrices (within-class and between-class) Sw = calculate_within_class_scatter_matrix(X, y) Sb = calculate_between_class_scatter_matrix(X, y) # Compute the eigenvalues and eigenvectors A = linalg.inv(Sw) @ Sb eigvals, eigvecs = linalg.eig(A) eigvals_sum = np.sum(eigvals) # Selecting linear discriminants for the new feature subspace eig_pairs = [(eigvals[i], eigvecs[:, i]) for i in range(A.shape[0])] eig_pairs.sort(key=lambda x: x[0], reverse=True) self.components = np.column_stack(( eig_pairs[i][1] for i in range(self.n_components) )) self.explained_variance = np.array([ eig_pairs[i][0] for i in range(self.n_components) ]) self.explained_variance_ratio = np.array([ v / eigvals_sum for v in self.explained_variance ]) return self def transform(self, X): X = super().transform(X) # Transforming the samples onto the new subspace return X @ self.components
#!/usr/bin/env python """ Usage: python run_significance_test.py predictions1 predictions2 gold_answers """ import sys import numpy as np from statsmodels.sandbox.stats.runs import mcnemar as mcnemar_test import csv np.random.seed(123) def compute_mcnemar_test(contingency_table): assert np.shape(contingency_table) == (2, 2) statistic, p_value = mcnemar_test(contingency_table, exact=True, correction=True) return statistic, p_value def read_submission_file(fn): """ Reads a file with labels in the submission format: story_id, label :param fn: filename with predictions :return: numpy array of shape = (1, 2) with the number of positive/negative labels """ labels = [] with open(fn) as csv_file: reader = iter(csv.reader(csv_file, delimiter=",", quotechar='"')) header = next(reader) if not header[0].isalpha(): # if there are no headers in the submission file lab = int(header[-1]) - 1 assert lab in [0,1] labels.append(lab) for row in reader: lab = int(row[-1]) -1 labels.append(lab) return labels def build_contingency_table(labels1, labels2, gold_arr): """ Our computation of the contingency table. :param labels1: an array of label predictions of clf1 :param labels2: an array of label predictions of clf2 :param gold_arr: an array of gold labels :return: numpy array (contingency table) of shape (2,2): Clf2-label-0 Clf2-label-1 Clf1-label-0 a b Clf1-label-1 c d """ pos_pos = pos_neg = neg_pos = neg_neg = 0 for idx, lab1 in enumerate(labels1): lab2 = labels2[idx] gold_lab = gold_arr[idx] if lab1 == gold_lab: if lab2 == gold_lab: pos_pos +=1 else: pos_neg +=1 elif lab2 != gold_lab: neg_neg +=1 else: neg_pos +=1 return np.asarray([[pos_pos, pos_neg],[neg_pos, neg_neg]]) if __name__ == "__main__": label_array1 = read_submission_file(sys.argv[1]) label_array2 = read_submission_file(sys.argv[2]) gold_array = read_submission_file(sys.argv[3]) contingency_table = build_contingency_table(label_array1, label_array2, gold_array) statistic, p_value = compute_mcnemar_test(contingency_table) print "Contingency table: \n", contingency_table print "McNemar test statistic: %0.7f\nP-value: %0.7f" % (statistic, p_value)
# from data_utils.ModelNetDataLoader import ModelNetDataLoader # from data_utils.OFFDataLoader import * import argparse import numpy as np import os import torch import logging from tqdm import tqdm from sklearn.metrics import confusion_matrix import sys import importlib from path import Path from data_utils.PCDLoader import * import matplotlib.pyplot as plt import seaborn as sn import pandas as pd from datetime import datetime BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = BASE_DIR sys.path.append(os.path.join(ROOT_DIR, 'models')) def parse_args(): '''PARAMETERS''' parser = argparse.ArgumentParser('Testing') parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') parser.add_argument('--batch_size', type=int, default=1, help='batch size in training') # parser.add_argument('--num_category', default=10, type=int, choices=[10, 40], help='training on ModelNet10/40') parser.add_argument('--num_category', default=15, type=int, help='training on real dataset') parser.add_argument('--sample_point', type=bool, default=True, help='Sampling on tacitle data') parser.add_argument('--num_point', type=int, default=1024, help='Point Number') parser.add_argument('--log_dir', type=str, required=True, help='Experiment root') parser.add_argument('--use_normals', action='store_true', default=False, help='use normals') parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling') parser.add_argument('--num_votes', type=int, default=3, help='Aggregate classification scores with voting') parser.add_argument('--SO3_Rotation', action='store_true', default=False, help='arbitrary rotation in SO3') return parser.parse_args() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def test(model, loader, num_class=15, vote_num=1): mean_correct = [] classifier = model.eval() class_acc = np.zeros((num_class, 3)) print(len(loader)) y_pred = [] y_true = [] num_correct = 0 num_samples = 0 all_pred_new = [] all_true_new = [] # classes = list(find_classes(data_path).keys()) for j, data in tqdm(enumerate(loader), total=len(loader)): if not args.use_cpu: points, target = data['pointcloud'].to(device).float(), data['category'].to(device) points = points.transpose(2, 1) vote_pool = torch.zeros(target.size()[0], num_class).cuda() ################################################################################### output_new, _ = classifier(points) _, preds_new = torch.max(output_new.data, 1) y_true_new = target.data.cpu().numpy() y_pred_new = preds_new.data.cpu().numpy() all_pred_new += list(y_pred_new) all_true_new += list(y_true_new) num_correct += (y_pred_new == y_true_new).sum() num_samples += y_pred_new.size ################################################################################## for _ in range(vote_num): pred, _ = classifier(points) vote_pool += pred pred = vote_pool / vote_num pred_choice = pred.data.max(1)[1] # pred for confusion matrix pred_conf = (torch.max(torch.exp(pred), 1)[1]).data.cpu().numpy() y_pred.extend(pred_conf) y_true.extend(target.data.cpu()) for cat in np.unique(target.cpu()): classacc = pred_choice[target == cat].eq(target[target == cat].long().data).cpu().sum() class_acc[cat, 0] += classacc.item() / float(points[target == cat].size()[0]) class_acc[cat, 1] += 1 correct = pred_choice.eq(target.long().data).cpu().sum() mean_correct.append(correct.item() / float(points.size()[0])) print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}') cf_matrix_new = confusion_matrix(all_true_new, all_pred_new, normalize='true') print(cf_matrix_new) # print(mean_correct) class_acc[:, 2] = class_acc[:, 0] / class_acc[:, 1] class_acc = np.mean(class_acc[:, 2]) instance_acc = np.mean(mean_correct) return instance_acc, class_acc, cf_matrix_new def main(args): def log_string(str): logger.info(str) print(str) '''HYPER PARAMETER''' os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu '''CREATE DIR''' experiment_dir = 'log/classification/' + args.log_dir '''LOG''' args = parse_args() logger = logging.getLogger("Model") logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) log_string('PARAMETER ...') log_string(args) '''DATA LOADING''' log_string('Load dataset ...') # tactile_data_path = 'data/tactile_data_pcd/' # tactile_data_path = 'data/tactile_pcd_10_sampled_21.02/' # tactile_data_path = 'data/visual_data_pcd/' # data_path = 'data/modelnet40_normal_resampled/' # data_path = Path("mesh_data/ModelNet10") visual_data_path = Path('data/Rotated_visual_data_pcd_bi/') test_dataset = PCDPointCloudData(visual_data_path, folder='Test', sample_method='Voxel', num_point=args.num_point, sample=args.sample_point, est_normal=args.use_normals, rotation=False) testDataLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=10) '''MODEL LOADING''' num_class = args.num_category model_name = os.listdir(experiment_dir + '/logs')[0].split('.')[0] model = importlib.import_module(model_name) classifier = model.get_model(num_class, normal_channel=args.use_normals) if not args.use_cpu: classifier = classifier.cuda() checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth') classifier.load_state_dict(checkpoint['model_state_dict']) # Load labels: classes = find_classes(visual_data_path) print(classes) print(classes.keys) with torch.no_grad(): # instance_acc, class_acc = test(classifier.eval(), testDataLoader, vote_num=args.num_votes, num_class=num_class) instance_acc, class_acc, cf_matrix_new = test(classifier.eval(), testDataLoader, vote_num=args.num_votes, num_class=num_class) log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc)) # Draw confusion matrix # df_cm = pd.DataFrame(cf_matrix_new *10, # index = [i for i in classes.keys()], columns = [i for i in classes.keys()]) # plt.figure(figsize = (12,7)) # sn.heatmap(df_cm, annot=True) # plt.savefig(experiment_dir + '/' + str(datetime.now()) + '.png') # df_cm = pd.DataFrame(cf_matrix_new/np.sum(cf_matrix_old) *10, # index = [i for i in classes.keys()], columns = [i for i in classes.keys()]) # plt.figure(figsize = (12,7)) # sn.heatmap(df_cm, annot=True) # plt.savefig(experiment_dir + '/' + str(datetime.now()) + '.png') if __name__ == '__main__': args = parse_args() main(args)
import tensorflow as tf from tensorflow.python.framework import ops import numpy as np import functools from keras import backend as K from keras.engine import Layer, InputSpec from keras import activations from keras import initializers from keras import regularizers, constraints def noisy_dense(inputs, units, bias_shape, b_i=None, activation=tf.nn.relu, noisy_distribution='factorised'): def f(e_list): return tf.multiply(tf.sign(e_list), tf.pow(tf.abs(e_list), 0.5)) # dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50) if not isinstance(inputs, ops.Tensor): inputs = ops.convert_to_tensor(inputs, dtype='float') # dim_list = inputs.get_shape().as_list() # flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:]) # reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape]) if len(inputs.shape) > 2: inputs = tf.contrib.layers.flatten(inputs) w_i = tf.random_uniform_initializer(-0.1, 0.1) flatten_shape = 10*10*16#inputs.shape[1] weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i) w_noise = tf.get_variable('w_noise', [flatten_shape, units], initializer=w_i) if noisy_distribution == 'independent': weights += tf.multiply(tf.random_normal(shape=w_noise.shape), w_noise) elif noisy_distribution == 'factorised': noise_1 = f(tf.random_normal(tf.TensorShape([flatten_shape, 1]), dtype=tf.float32)) noise_2 = f(tf.random_normal(tf.TensorShape([1, units]), dtype=tf.float32)) weights += tf.multiply(noise_1 * noise_2, w_noise) dense = tf.matmul(inputs, weights) if bias_shape is not None: assert bias_shape[0] == units biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i) b_noise = tf.get_variable('b_noise', [1, units], initializer=b_i) if noisy_distribution == 'independent': biases += tf.multiply(tf.random_normal(shape=b_noise.shape), b_noise) elif noisy_distribution == 'factorised': biases += tf.multiply(noise_2, b_noise) return activation(dense + biases) if activation is not None else dense + biases return activation(dense) if activation is not None else dense class NoiseDense(Layer): def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(NoiseDense, self).__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True def build(self, input_shape): assert len(input_shape) >= 2 input_dim = input_shape[-1] self.kernel = self.add_weight(shape=(input_dim, self.units), initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.kernelSigma = self.add_weight(shape=(input_dim, self.units), initializer=self.kernel_initializer, name='kernelSigma', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.units,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) self.biasSigma = self.add_weight(shape=(self.units,), initializer=self.bias_initializer, name='biasSigma', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim}) self.built = True def call(self, inputs): newW = K.add(self.kernel,self.kernelSigma) output = K.dot(inputs, self.kernel) if self.use_bias: output = K.bias_add(output, self.bias) if self.activation is not None: output = self.activation(output) return output def compute_output_shape(self, input_shape): assert input_shape and len(input_shape) >= 2 assert input_shape[-1] output_shape = list(input_shape) output_shape[-1] = self.units return tuple(output_shape) def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(NoiseDense, self).get_config() return dict(list(base_config.items()) + list(config.items()))
import os import argparse import torch import numpy import random from datetime import datetime def format_time(): now = datetime.now() # current date and time date_time = now.strftime("%m-%d-%H:%M:%S") return date_time def ensure_dir(path): if not os.path.exists(path): os.makedirs(path) def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('true'): return True elif v.lower() in ('false'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def set_seed(seed): # torch.backends.cudnn.deterministic = True ## this one is controversial torch.manual_seed(seed) numpy.random.seed(seed) random.seed(seed) torch.cuda.manual_seed(seed)
import codecs import numpy as np from utils.data_utils import SPACE, PUNCTUATION_VOCABULARY, PUNCTUATION_MAPPING def compute_score(target_path, predicted_path): """Computes and prints the overall classification error and precision, recall, F-score over punctuations.""" mappings, counter, t_i, p_i = {}, 0, 0, 0 total_correct, correct, substitutions, deletions, insertions = 0, 0.0, 0.0, 0.0, 0.0 true_pos, false_pos, false_neg = {}, {}, {} with codecs.open(target_path, "r", "utf-8") as f_target, codecs.open(predicted_path, "r", "utf-8") as f_predict: target_stream = f_target.read().split() predict_stream = f_predict.read().split() while True: if PUNCTUATION_MAPPING.get(target_stream[t_i], target_stream[t_i]) in PUNCTUATION_VOCABULARY: # skip multiple consecutive punctuations target_punct = " " while PUNCTUATION_MAPPING.get(target_stream[t_i], target_stream[t_i]) in PUNCTUATION_VOCABULARY: target_punct = PUNCTUATION_MAPPING.get(target_stream[t_i], target_stream[t_i]) target_punct = mappings.get(target_punct, target_punct) t_i += 1 else: target_punct = " " if predict_stream[p_i] in PUNCTUATION_VOCABULARY: predicted_punct = mappings.get(predict_stream[p_i], predict_stream[p_i]) p_i += 1 else: predicted_punct = " " is_correct = target_punct == predicted_punct counter += 1 total_correct += is_correct if predicted_punct == " " and target_punct != " ": deletions += 1 elif predicted_punct != " " and target_punct == " ": insertions += 1 elif predicted_punct != " " and target_punct != " " and predicted_punct == target_punct: correct += 1 elif predicted_punct != " " and target_punct != " " and predicted_punct != target_punct: substitutions += 1 true_pos[target_punct] = true_pos.get(target_punct, 0.0) + float(is_correct) false_pos[predicted_punct] = false_pos.get(predicted_punct, 0.0) + float(not is_correct) false_neg[target_punct] = false_neg.get(target_punct, 0.0) + float(not is_correct) assert target_stream[t_i] == predict_stream[p_i] or predict_stream[p_i] == "<unk>", \ "File: %s \nError: %s (%s) != %s (%s) \nTarget context: %s \nPredicted context: %s" % \ (target_path, target_stream[t_i], t_i, predict_stream[p_i], p_i, " ".join(target_stream[t_i - 2:t_i + 2]), " ".join(predict_stream[p_i - 2:p_i + 2])) t_i += 1 p_i += 1 if t_i >= len(target_stream) - 1 and p_i >= len(predict_stream) - 1: break overall_tp, overall_fp, overall_fn = 0.0, 0.0, 0.0 out_str = "-" * 46 + "\n" out_str += "{:<16} {:<9} {:<9} {:<9}\n".format("PUNCTUATION", "PRECISION", "RECALL", "F-SCORE") for p in PUNCTUATION_VOCABULARY: if p == SPACE: continue overall_tp += true_pos.get(p, 0.0) overall_fp += false_pos.get(p, 0.0) overall_fn += false_neg.get(p, 0.0) punctuation = p precision = (true_pos.get(p, 0.0) / (true_pos.get(p, 0.0) + false_pos[p])) if p in false_pos else np.nan recall = (true_pos.get(p, 0.0) / (true_pos.get(p, 0.0) + false_neg[p])) if p in false_neg else np.nan f_score = (2. * precision * recall / (precision + recall)) if (precision + recall) > 0 else np.nan out_str += u"{:<16} {:<9} {:<9} {:<9}\n".format(punctuation, "{:.2f}".format(precision * 100), "{:.2f}".format(recall * 100), "{:.2f}".format(f_score * 100)) out_str += "-" * 46 + "\n" pre = overall_tp / (overall_tp + overall_fp) if overall_fp else np.nan rec = overall_tp / (overall_tp + overall_fn) if overall_fn else np.nan f1 = (2.0 * pre * rec) / (pre + rec) if (pre + rec) else np.nan out_str += "{:<16} {:<9} {:<9} {:<9}\n".format("Overall", "{:.2f}".format(pre * 100), "{:.2f}".format(rec * 100), "{:.2f}".format(f1 * 100)) err = round((100.0 - float(total_correct) / float(counter - 1) * 100.0), 2) ser = round((substitutions + deletions + insertions) / (correct + substitutions + deletions) * 100, 1) out_str += "ERR: %s%%\n" % err out_str += "SER: %s%%" % ser return out_str, f1, err, ser
# Uses https://github.com/kucherenko/jscpd to find code duplication import subprocess import os import sys import json import numpy as np import pandas as pd import matplotlib.pyplot as plt from common import get_base_dir, get_projects_to_scan token_range_for_cpds = np.arange(5, 20, 3) def cpd_for_project_with_min_lines(base, project, lines): subprocess.check_call(['jscpd', '--format', 'java', '--mintokens', str(lines * 10), 'min-lines', str(lines), '--ignore', '**/*Test.java', '-r', 'json', os.path.join(base, project)]) with open('report/jscpd-report.json') as json_file: data = json.load(json_file) os.remove(json_file.name) return float(data['statistics']['total']['percentage']) def cpd_for_project(base, project): return list(map( lambda lines: cpd_for_project_with_min_lines(base, project, lines), token_range_for_cpds)) def line_plot(df): plt.style.use('seaborn-darkgrid') my_dpi = 96 plt.figure(figsize = (480/my_dpi, 480/my_dpi), dpi = my_dpi) # multiple line plots for column in df.drop('x', axis = 1): plt.plot(df['x'], df[column], marker = '', linewidth = 1, alpha = 0.4) plt.xticks(np.arange(5, 20, 3)) num = 0 for i in df.values[0][1:]: num += 1 name = list(df)[num] plt.text(5, i, name, horizontalalignment = 'left', size = 'small') plt.title("Code duplication in projects", fontsize = 12, fontweight = 0) plt.xlabel("Minimum lines to consider for duplication") plt.ylabel("Duplication (%)") plt.show() def main(): base_dir = get_base_dir() try: projects = get_projects_to_scan(base_dir) all_project_cpds = [(x, cpd_for_project(base_dir, x)) for x in projects] df = pd.DataFrame({'x': token_range_for_cpds}) for project, cpds in all_project_cpds: df[project] = cpds line_plot(df) print(df) except: _, value, _ = sys.exc_info() print('Error %s: %s' % (value.filename, value.strerror)) main()
import pymc3 as pm import theano.tensor as tt def estimate_retention_probability(observed_retained, n_items, t_steps, beta_remembering_kwargs, beta_decay_kwargs, id_ind=None): """ """ with pm.Model() as model: t_steps_data = pm.Data( 't_steps', t_steps ) if id_ind is not None: id_ind_data = pm.Data( 'id_ind', id_ind ) beta = pm.Beta( 'alpha', shape=(len(id_ind), ), **beta_decay_kwargs ) alpha = pm.Beta( 'alpha', shape=(len(id_ind), ), **beta_decay_kwargs ) decay = pm.Deterministic( 'exponential_decay', pm.math.exp( -alpha[id_ind_data] * t_steps_data ) ) theta = pm.Deterministic( 'theta', tt.minimum(1, decay + beta[id_ind_data]) ) else: beta = pm.Beta( 'alpha', **beta_decay_kwargs ) alpha = pm.Beta( 'alpha', **beta_decay_kwargs ) decay = pm.Deterministic( 'exponential_decay', pm.math.exp( -alpha * t_steps_data ) ) theta = pm.Deterministic( 'theta', tt.minimum(1, decay + beta) ) observed = pm.Binomial( 'observed_retained', n=n_items, p=theta, observed=observed_retained ) return model def estimate_retention_probability_hier(observed_retained, n_items, t_steps, id_ind, uniform_alpha_remembering_kwargs, uniform_beta_remembering_kwargs, uniform_alpha_decay_kwargs, uniform_beta_decay_kwargs): """ """ with pm.Model() as model: t_steps_data = pm.Data( 't_steps', t_steps ) id_ind_data = pm.Data( 'id_ind', id_ind ) # ######################### HP ####################################### beta_alpha_hp = pm.Uniform( 'alpha_remembering', **uniform_alpha_remembering_kwargs ) beta_beta_hp = pm.Uniform( 'beta_remembering', **uniform_beta_remembering_kwargs ) alpha_alpha_hp = pm.Uniform( 'alpha_decay', **uniform_alpha_decay_kwargs ) alpha_beta_hp = pm.Uniform( 'beta_decay', **uniform_beta_decay_kwargs ) # #################################################################### beta = pm.Beta( 'beta', shape=(len(id_ind), ), alpha=beta_alpha_hp, beta=beta_beta_hp ) alpha = pm.Beta( 'alpha', shape=(len(id_ind), ), alpha=alpha_alpha_hp, beta=alpha_beta_hp ) decay = pm.Deterministic( 'exponential_decay', pm.math.exp( -alpha[id_ind_data] * t_steps_data ) ) theta = pm.Deterministic( 'theta', tt.minimum(1, decay + beta[id_ind_data]) ) observed = pm.Binomial( 'observed_retained', n=n_items, p=theta, observed=observed_retained ) return model
r""" Features for testing the presence of nauty executables """ from sage.env import SAGE_NAUTY_BINS_PREFIX from . import Executable from .join_feature import JoinFeature class NautyExecutable(Executable): r""" A :class:`~sage.features.Feature` which checks for nauty executables. EXAMPLES:: sage: from sage.features.nauty import NautyExecutable sage: NautyExecutable('converseg').is_present() # optional - nauty FeatureTestResult('nauty_converseg', True) """ def __init__(self, name): r""" TESTS:: sage: from sage.features.nauty import NautyExecutable sage: isinstance(NautyExecutable('geng'), NautyExecutable) True """ Executable.__init__(self, name=f"nauty_{name}", executable=f"{SAGE_NAUTY_BINS_PREFIX}{name}", spkg="nauty") class Nauty(JoinFeature): r""" A :class:`~sage.features.Feature` describing the presence of the executables which comes as a part of ``nauty``. EXAMPLES:: sage: from sage.features.nauty import Nauty sage: Nauty().is_present() # optional - nauty FeatureTestResult('nauty', True) """ def __init__(self): r""" TESTS:: sage: from sage.features.nauty import Nauty sage: isinstance(Nauty(), Nauty) True """ JoinFeature.__init__(self, "nauty", [NautyExecutable(name) for name in ('directg', 'gentourng', 'geng', 'genbg', 'gentreeg', 'converseg')]) def all_features(): return [Nauty()]
# -*- coding: utf-8 -*- """ Created on Thu Apr 29 15:37:37 2021 Short Function to create .JPEG copies of input image tiles for model input @author: Grant Francis email: gfrancis@uvic.ca """ import os from PIL import Image import glob import numpy as np def to_jpg(lib): total_tiles = len([name for name in os.listdir(lib) if os.path.isfile(lib + '/' + name)]) print('Making .JPG copies for model input...') for i in range(total_tiles): Gtif = lib + '/%s.tif'%i G = Image.open(Gtif) arr_G = np.array(G) Image.fromarray(arr_G.astype(np.uint8)).save(lib + '/%s.jpg'%i) return
import os import numpy as np from sparse import load_npz # This will make an average kernel matrix of (M x N) dimensions. # If refcodes_path == comparison_refcodes_path, then M = N. # This code must be run after `soap_matrix_generator.py` # Note: This can be memory-intensive. # Settings basepath = os.getcwd() # Base path where results will be stored refcodes_path = os.path.join('..','qmof-refcodes.csv') # IDs corresponding to N structures comparison_refcodes_path = refcodes_path # IDs corresponding to M structures soaps_path = os.path.join(basepath, 'soap_matrices') # Path where SOAP matrices are stored #--------------------------------------- # Read in refcodes refcodes = np.genfromtxt(refcodes_path, delimiter=',', dtype=str).tolist() if refcodes_path == comparison_refcodes_path: comparison_refcodes = refcodes else: comparison_refcodes = np.genfromtxt( comparison_refcodes_path, delimiter=',', dtype=str).tolist() M = len(refcodes) N = len(comparison_refcodes) # Get number of features kernel_name = 'avg_soap_kernel.csv' example_soap = load_npz(os.path.join(soaps_path, os.listdir(soaps_path)[0])) N_features = np.shape(example_soap)[1] # Prepare M average SOAPs print('Initializing M matrix') avg_soaps_M = np.zeros((M, N_features), dtype=np.float32) for i in range(M): print(refcodes[i]) p = os.path.join(soaps_path, 'soap_'+str(refcodes[i])+'.npz') soap_temp = load_npz(p).todense() avg_soaps_M[i, :] = soap_temp.mean(axis=0) # Prepare N average SOAPs if M != N or refcodes_path != comparison_refcodes_path: print('Initializing N matrix') avg_soaps_N = np.zeros((N, N_features), dtype=np.float32) for i in range(N): print(comparison_refcodes[i]) p = os.path.join(soaps_path, 'soap_' + str(comparison_refcodes[i])+'.npz') soap_temp = load_npz(p).todense() avg_soaps_N[i, :] = soap_temp.mean(axis=0) # Compute average kernel matrix print('Computing K') if M == N and refcodes_path == comparison_refcodes_path: K = avg_soaps_M.dot(avg_soaps_M.T) norm = np.sqrt(np.einsum('ii,jj->ij', K, K)) else: K = avg_soaps_M.dot(avg_soaps_N.T) norm = np.sqrt(np.einsum( 'ii,jj->ij', avg_soaps_M.dot(avg_soaps_M.T), avg_soaps_N.dot(avg_soaps_N.T))) K = K/norm np.savetxt(os.path.join(basepath, kernel_name), K.T, delimiter=',')
from .sound import Sound import warnings import numpy as np import os import time try: from deepspeech import Model as DPModel except ModuleNotFoundError: warnings.warn("Missing some of the required libraries for running DeepSpeech.", UserWarning) try: import json import vosk from library.audio_format import float_to_byte except ModuleNotFoundError: warnings.warn("Missing some of the required libraries for running Vosk.", UserWarning) try: import argparse import torch from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder from fairseq import checkpoint_utils from fairseq.data import Dictionary from omegaconf.errors import MissingMandatoryValue except ModuleNotFoundError: warnings.warn("Missing some of the required libraries for running Wav2vec 2.0.", UserWarning) class ASRSystem(object): """ Parent class for all automatic speech recognition systems. Dependants of ASRSystem must implement self.transcribe(sound_or_path) and set self.samplerate_hz (the expected samplerate of input sounds for the ASR system) on initialisation. """ def __init__(self, model_path, identifier=None): self.__samplerate_hz = None self.__samplerate_hz_is_set = False if identifier is None: self.id = int(time.time()) else: self.id = identifier self.model_path = model_path def transcribe(self, sound_or_path): """ Transcribe the given audiofile (path to file or array type). """ raise NotImplementedError() def _load_sound(self, sound_or_path): """ Loads a sound from file path or directly array into a Sound object. Sounds are always resampled to self.samplerate_Hz (usually 16kHz, but this may differ per ASR system) to ensure the input sound is at the samplerate that is expected by the ASR system. """ if isinstance(sound_or_path, str): sound = Sound(sound_or_path, samplerate_Hz=self.samplerate_hz) else: sound = sound_or_path if self.samplerate_hz: sound = Sound.resample(sound, self.samplerate_hz) return sound @property def samplerate_hz(self): """The sample rate in Hz the ASRsystem expects the input sound to be sampled at.""" if not self.__samplerate_hz_is_set: raise NotImplementedError('Descendants from ASRSystem must set samplerate_hz.') else: return self.__samplerate_hz @samplerate_hz.setter def samplerate_hz(self, value): self.__samplerate_hz = value self.__samplerate_hz_is_set = True @classmethod def create(cls, asrsystem_name, model_path): """Creates an instance of asrsystem_name using the model path.""" if asrsystem_name == 'MozillaDeepSpeech': return MozillaDeepSpeech(model_path) elif asrsystem_name == 'Vosk': return Vosk(model_path) elif asrsystem_name == 'Wav2Vec2': return Wav2Vec2(model_path) elif asrsystem_name == 'DummyASR': return DummyASR(model_path) else: raise ValueError('ASR system `%s` has not been implemented.') class DummyASR(ASRSystem): """ Dummy ASR system that always transcribes 'hello world'. """ def __init__(self, model_path, use_language_model=False, identifier=None): super(DummyASR, self).__init__(model_path, identifier) self.samplerate_hz = 16000 def transcribe(self, sound_or_path, fs=None): return 'hello world' class Vosk(ASRSystem): """ Implements a Vosk Kaldi model based on the model file at model_path. See https://alphacephei.com/vosk/install for installation instructions. """ def __init__(self, model_path, samplerate=16000, identifier=None): super(Vosk, self).__init__(model_path, identifier) vosk.SetLogLevel(0) self.model = vosk.Model(self.model_path) self.rec = vosk.KaldiRecognizer(self.model, samplerate) self.samplerate_hz = samplerate def transcribe(self, sound_or_path, fs=None): def frame_generator(sound): c = 0 nr_frames = 4000 while c + nr_frames <= len(sound): yield float_to_byte(sound[c:c+nr_frames]) c += nr_frames if c < len(sound): yield float_to_byte(sound[c:]) sound = self._load_sound(sound_or_path) for data in frame_generator(sound): if self.rec.AcceptWaveform(data): self.rec.Result() else: self.rec.PartialResult() final_result = json.loads(self.rec.FinalResult()) return final_result['text'] class MozillaDeepSpeech(ASRSystem): """ Implements a Mozilla DeepSpeech model based on the model file at model_path. This code assumes the model follows Mozilla DeepSpeech version 6.1 and may not work for later models. See https://deepspeech.readthedocs.io/en/v0.6.1/USING.html for installation instructions. """ def __init__(self, model_path, use_language_model=False, identifier=None): super(MozillaDeepSpeech, self).__init__(model_path, identifier) model_path = os.path.join(self.model_path, 'output_graph.pbmm') alphabet_path = os.path.join(self.model_path, 'alphabet.txt') language_model_path = os.path.join(self.model_path, 'lm.binary') trie_path = os.path.join(self.model_path, 'trie') self._model = DPModel(model_path, 500) self.samplerate_hz = 16000 if use_language_model: self._model.enableDecoderWithLM(language_model_path, trie_path, 0.75, 1.85) def transcribe(self, sound_or_path, fs=None): sound = self._load_sound(sound_or_path) sound = (np.iinfo(np.int16).max * sound).astype(np.int16) res = self._model.stt(sound) return res class Wav2Vec2(ASRSystem): """ Implements Wav2Vec 2.0 model based on the model file at model_path. This code assumes the model follows Wav2Vec 2.0 as per https://github.com/pytorch/fairseq/commit/1bba712622b8ae4efb3eb793a8a40da386fe11d0 and may not work for later models. See link above for installation instructions. Note that this code requires a dictionary file (dict_file) to be present in the same folder as where the main model (model_path) is located. """ def __init__(self, model_path, identifier=None, dict_file='dict.ltr.txt'): super(Wav2Vec2, self).__init__(model_path, identifier) self._init_model(dict_file) self.samplerate_hz = 16000 def _init_model(self, dict_file): parser = self._create_parser(dict_file) model_dir = os.path.dirname(self.model_path) target_dict_path = os.path.join(model_dir, dict_file) args = parser.parse_args( ['--target_dict_path', target_dict_path, '--w2v_path', self.model_path] ) target_dict = Dictionary.load(args.target_dict_path) self.model = self._load_model(args.w2v_path, target_dict)[0] self.model.eval() self.generator = W2lViterbiDecoder(args, target_dict) self.args = args self.target_dict = target_dict def _create_parser(self, dict_file): parser = argparse.ArgumentParser(description='Wav2vec-2.0 Recognize') parser.add_argument('--w2v_path', type=str, default='~/wav2vec2_vox_960h.pt', help='path of pre-trained wav2vec-2.0 model') parser.add_argument('--target_dict_path', type=str, default=dict_file, help='path of target dict (dict.ltr.txt)') parser.add_argument('--nbest', type=int, default=1, help='nbest') parser.add_argument('--criterion', type=str, default='ctc', help='type of criterion') return parser def _load_model(self, model_path, target_dict): class BasicTask(object): # Class to get around creating a full task object. def __init__(self, target_dict): self.target_dictionary = target_dict data_folder = self.model_path.replace(self.model_path.split('/')[-1], '') args_overrides = { 'data': data_folder, } try: model, cfg, task = checkpoint_utils.load_model_ensemble_and_task( [model_path], arg_overrides=args_overrides ) except MissingMandatoryValue as e: args_overrides['w2v_path'] = model_path model, cfg, task = checkpoint_utils.load_model_ensemble_and_task( [model_path], arg_overrides=args_overrides ) return model def transcribe(self, sound_or_path): sound = self._load_sound(sound_or_path) # Reformat input sound so it is shaped as expected by Wav2Vec 2.0. sample = dict() net_input = dict() feature = self._extract_feature(sound) net_input["source"] = feature.unsqueeze(0) padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0) net_input["padding_mask"] = padding_mask sample["net_input"] = net_input # Move to GPU if required. if next(self.model.parameters()).is_cuda: sample['net_input']['source'] = sample['net_input']['source'].to(torch.device("cuda:0")) sample['net_input']['padding_mask'] = sample['net_input']['padding_mask'].to(torch.device("cuda:0")) # Run network input through network to obtain output. with torch.no_grad(): hypo = self.generator.generate([self.model], sample, prefix_tokens=None) # Clean up output into letter output. hyp_pieces = self.target_dict.string(hypo[0][0]["tokens"].int().cpu()) transcription = hyp_pieces.replace(" ", "").replace("|", " ").strip() return transcription def _extract_feature(self, sound): def postprocess(feats): if feats.dim() == 2: feats = feats.mean(-1) assert feats.dim() == 1, feats.dim() with torch.no_grad(): feats = torch.nn.functional.layer_norm(feats, feats.shape) return feats feats = torch.from_numpy(sound).float() feats = postprocess(feats) return feats
from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.patches import Rectangle import matplotlib.pyplot as plt from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker, VPacker import matplotlib.lines as mlines import operator import numpy as np import six """ A legend is a dict of type {aesthetic: { 'column_name': 'column-name-in-the-dataframe', 'dict': {visual_value: legend_key}, 'scale_type': 'discrete' | 'continuous'}} where aesthetic is one of: 'color', 'fill', 'shape', 'size', 'linetype', 'alpha' visual_value is a: color value, fill color value, linetype string, shape character, size value, alpha value legend_key is either: - quantile-value for continuous mappings. - value for discrete mappings. """ def make_title(title): title = title.title() return TextArea(" %s " % title, textprops=dict(color="k", fontweight="bold")) def make_shape_key(label, shape): idx = len(label) pad = 20 - idx lab = label[:max(idx, 20)] pad = " "*pad label = TextArea(" %s" % lab, textprops=dict(color="k")) viz = DrawingArea(15, 20, 0, 0) fontsize = 10 key = mlines.Line2D([0.5*fontsize], [0.75*fontsize], marker=shape, markersize=(0.5*fontsize), c="k") viz.add_artist(key) return HPacker(children=[viz, label], align="center", pad=5, sep=0) def make_size_key(label, size): if not isinstance(label, six.string_types): label = round(label, 2) label = str(label) idx = len(label) pad = 20 - idx lab = label[:max(idx, 20)] pad = " "*pad label = TextArea(" %s" % lab, textprops=dict(color="k")) viz = DrawingArea(15, 20, 0, 0) fontsize = 10 key = mlines.Line2D([0.5*fontsize], [0.75*fontsize], marker="o", markersize=size / 20., c="k") viz.add_artist(key) return HPacker(children=[viz, label], align="center", pad=5, sep=0) # TODO: Modify to correctly handle both, color and fill # to include an alpha def make_line_key(label, color): label = str(label) idx = len(label) pad = 20 - idx lab = label[:max(idx, 20)] pad = " "*pad label = TextArea(" %s" % lab, textprops=dict(color="k")) viz = DrawingArea(20, 20, 0, 0) viz.add_artist(Rectangle((0, 5), width=16, height=5, fc=color)) return HPacker(children=[viz, label], height=25, align="center", pad=5, sep=0) def make_linetype_key(label, linetype): idx = len(label) pad = 20 - idx lab = label[:max(idx, 20)] pad = " "*pad label = TextArea(" %s" % lab, textprops=dict(color="k")) viz = DrawingArea(30, 20, 0, 0) fontsize = 10 x = np.arange(0.5, 2.25, 0.25) * fontsize y = np.repeat(0.75, 7) * fontsize key = mlines.Line2D(x, y, linestyle=linetype, c="k") viz.add_artist(key) return HPacker(children=[viz, label], align="center", pad=5, sep=0) legend_viz = { "color": make_line_key, "fill": make_line_key, "linetype": make_linetype_key, "shape": make_shape_key, "size": make_size_key, } def add_legend(legend, ax): """ Add a legend to the axes Parameters ---------- legend: dictionary Specification in components.legend.py ax: axes """ # TODO: Implement alpha # It should be coupled with fill, if fill is not # part of the aesthetics, then with color remove_alpha = 'alpha' in legend if remove_alpha: _alpha_entry = legend.pop('alpha') # py3 and py2 have different sorting order in dics, # so make that consistent for i, aesthetic in enumerate(sorted(legend.keys())): legend_entry = legend[aesthetic] new_legend = draw_entry(ax, legend_entry, aesthetic, i) ax.add_artist(new_legend) if remove_alpha: legend['alpha'] = _alpha_entry def draw_entry(ax, legend_entry, aesthetic, ith_entry): children = [] children.append(make_title(legend_entry['column_name'])) viz_handler = legend_viz[aesthetic] legend_items = sorted(legend_entry['dict'].items(), key=operator.itemgetter(1)) children += [viz_handler(str(lab), col) for col, lab in legend_items] box = VPacker(children=children, align="left", pad=0, sep=5) # TODO: The vertical spacing between the legends isn't consistent. Should be # padded consistently anchored_box = AnchoredOffsetbox(loc=6, child=box, pad=0., frameon=False, #bbox_to_anchor=(0., 1.02), # Spacing goes here bbox_to_anchor=(1, 0.8 - 0.35 * ith_entry), bbox_transform=ax.transAxes, borderpad=1., ) # Workaround for a bug in matplotlib up to 1.3.1 # https://github.com/matplotlib/matplotlib/issues/2530 anchored_box.set_clip_on(False) return anchored_box if __name__=="__main__": fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.4, 0.7]) ax.add_artist(draw_legend(ax,{1: "blah", 2: "blah2", 15: "blah4"}, "size", 1)) plt.show(block=True)
import pytest import cv2 import time import numpy as np from perception.scene.eval import SceneSensor from perception.common.video import clip_video_to_frames, VideoWriter from perception.common.visualize import draw_bboxes from perception.common.utils import robot2_frame_crop_resize VIDEO = 'data/potential_interaction.mp4' R2_VIDEO = 'data/passing_by.mp4' YOLOv3_MODEL = 'pretrain_models/yolov3_r34' YOLOv4_MODEL = 'tools/yolov4_paddle/inference_model' class TestYOLOv3SceneSensor(object): def setup_class(self): self.scene_sensor = SceneSensor( YOLOv3_MODEL, gpu=0, algorithm='yolov3') self.frames = clip_video_to_frames(VIDEO, 3001., 4000.) def test_get_instances(self, export=True): instances_lst = self.scene_sensor.get_instances(self.frames) assert len(instances_lst) == len(self.frames) if export: h, w, fps = 480, 640, 24. # read from VIDEO video_writer = VideoWriter( 'data/scene_yolo_demo.mp4', (w, h), fps) for frame, instances in zip(self.frames, instances_lst): bboxes = np.array([i['bbox'] for i in instances]) labels = [i['category'] for i in instances] frame_draw = draw_bboxes(frame, bboxes, labels=labels) video_writer.add_frame(frame_draw) video_writer.close() def test_get_feature_map(self): feature_maps = self.scene_sensor.get_feature_map(self.frames) assert len(feature_maps) == len(self.frames) class TestYOLOv4SceneSensor(object): def setup_class(self): self.roi_feat_resolution = 5 self.scene_sensor = SceneSensor( YOLOv4_MODEL, gpu=0, img_shape=[3, 416, 416], roi_feat_resolution=self.roi_feat_resolution, algorithm='yolov4') self.frames = clip_video_to_frames(R2_VIDEO, 0., None) def test_get_instances(self, export=True): instances_lst = self.scene_sensor.get_instances(self.frames) assert len(instances_lst) == len(self.frames) if export: h, w, fps = 720, 1280, 24. # read from VIDEO video_writer = VideoWriter( 'data/scene_yolo4_demo.mp4', (w, h), fps) for frame, instances in zip(self.frames, instances_lst): bboxes = np.array([i['bbox'] for i in instances]) labels = [i['category'] for i in instances] frame_draw = draw_bboxes(frame, bboxes, labels=labels) video_writer.add_frame(frame_draw) video_writer.close() def test_get_instances_with_feats(self): instances_lst, fm_lst = self.scene_sensor.get_instances_with_feats( self.frames, get_full_fm=True) _, h, w = instances_lst[0][0]['fm'].shape assert h == w == self.roi_feat_resolution assert len(instances_lst) == len(fm_lst) == len(self.frames)
import numpy as np import chainer import chainer.functions as F def main(): for _ in range(1000): inp = np.random.random((2, 3, 224, 224)).astype(np.float32) ret = F.sqrt(F.relu(inp - 0.5)).array assert np.sum(np.isnan(ret)) == 0 print("no error") if __name__ == '__main__': main()
""" This module stores the FeaModel class. This class is the highest level object in a pycalculix program. It stores all parts, loads, constraints, mesh, problem, and results_file objects. """ import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection # element plotting import matplotlib.colors as colors import matplotlib.cm as cmx import subprocess # used to launch meshers cgx and gmsh import os # used to delete files written by cgx from numpy.core.function_base import linspace # need to make contours from . import environment from . import base_classes from . import geometry from . import material from . import components from . import connectors from . import loads from . import mesh from . import partmodule from . import problem from . import selector # element colors, 0-1, 0=black, 1=whate ECOLOR = '.4' FCOLOR = '.9' CMAP = 'jet' # for results GEOM_CMAP = 'Pastel1' # color map for parts or areas class FeaModel(object): """Makes a FeaModel instance. Parts, area, lines, arcs, points, nodes, elements, faces, models, components, and loads are stored in this object. Args: model_name (str): project name for this FeaModel, this is a file prefix ccx (None or str): path to Calculix ccx solver, pass this when you want to overwite the default program location. None means the default envionment.CCX is used. cgx (None or str): path to Calculix cgx mesher, pass this when you want to overwite the default program location. None means the default envionment.CGX is used. gmsh (None or str): path to gmsh mesher, pass this when you want to overwite the default program location. None means the default envionment.GMSH is used. Attributes: fname (str): FeaModel project file name prefix points (Itemlist): list of all Point lines (Itemlist): list of all Line and Arc signlines (Itemlist): list of all SignLine and SignArc lineloops (Itemlist): list of all LineLoop, each contains SignLine SignArc areas (Itemlist):list of all Area, each contains LineLoop(s) parts (Itemlist): list of all Part matls (Itemlist): list of all materials components (Itemlist): list of all components loads (dict): a dictionary of loads | Key (float): the load time point | loads[time] = list of loads for that time step | See method set_time to change the current time. | Time = 0.0 stores constant loads, such as: | material, thickness contacts (Itemlist): list of contacts surfints (Itemlist): list of surface interactions problems (Itemlist): list of problems nodes (Meshlist): list of all mesh nodes eshape (str): element shape - 'quad': quadrilateral elements (Default) - 'tri': triangle elements eorder (int): element order, 1 or 2 - 1: elements meshed with corner nodes only - 2: (Default) elements meshed with corner and midside nodes elements (Meshlist): list of all mesh elements faces (list): list of all element faces, includes non-exterior ones view (Selector): currently selected items Attributes: parts: list of selected parts areas: list of selected areas lines: list of selected signed lines or arcs points: list of selected points elements: list of selected elements faces: list of selected faces nodes: list of selected nodes time (float): current model time value, defaults to 1.0 units (dict): a dict to store units for different fields Keys: - 'displ': displacement or location - 'force': force - 'stress': stress - 'temp': temperature - 'density': density (mass/volume) - 'time': time Returns: Text string describing the units for the given key field. See the set_units method. For example when units have been set to metric, the below values are returned. - 'dist' --> 'm' - 'density' --> 'kg/(m^3)' - 'time' --> 's' """ def __init__(self, model_name, ccx=None, cgx=None, gmsh=None): self.fname = model_name self.points = base_classes.Itemlist() self.lines = base_classes.Itemlist() self.signlines = base_classes.Itemlist() self.lineloops = base_classes.Itemlist() self.areas = base_classes.Itemlist() self.parts = base_classes.Itemlist() self.matls = base_classes.Itemlist() self.components = base_classes.Itemlist() self.loads = {} # store loads by time self.contacts = base_classes.Itemlist() self.surfints = base_classes.Itemlist() self.problems = base_classes.Itemlist() self.nodes = base_classes.Meshlist() self.elements = base_classes.Meshlist() self.faces = [] self.view = selector.Selector(self) self.time = 1.0 # 0 is model set-up, 1 is first step, etc self.eshape = 'quad' self.eorder = 2 self.units = {} # fix the paths to the needed programs, ccx, cgx, and gmsh if ccx != None: environment.CCX = ccx if cgx != None: environment.CGX = cgx if gmsh != None: environment.GMSH = gmsh def set_ediv(self, items, ediv): """Sets the number of elements on the passed line. Args: items (str or SignLine or SignArc or list): lines to set ediv on - str: 'L0' - list of str ['L0', 'L1'] - list of SignLine or SignArc part.bottom or part.hole[-1] ediv (int): number of elements to mesh on the line """ items = self.get_items(items) for line in items: line.set_ediv(ediv) def set_esize(self, items, esize): """Sets the element size on the passed line. Args: items (str or SignLine or SignArc or list): lines or points to set esize on - str: 'L0' - list of str ['L0', 'L1', 'P3'] - list of SignLine or SignArc part.bottom or part.hole[-1] esize (float): size of the mesh elements on the line """ items = self.get_items(items) for item in items: item.set_esize(esize) def set_units(self, dist_unit='m', cfswitch=False): """Sets the units that will be displayed when plotting. Picks a unit set based on the passed distance unit. That unit set is printed to the console when set. Defaults to MKS units (meter-kilogram-second) ======== ===== ====== =========== =============== ==== Distance Force Stress Temperature Density Time ======== ===== ====== =========== =============== ==== 'm' 'N' 'Pa' 'K' 'kg/(m^3)' 's' 'mm' 'N' 'MPa' 'K' 'tonne/(mm^3)' 's' 'in' 'lbf' 'psi' 'R' 'slinch/(in^3)' 's' 'ft' 'lbf' 'psf' 'R' 'slug/(ft^3)' 's' ======== ===== ====== =========== =============== ==== See get_units method or returning text strings based on unit types. Args: dist_unit (str): string of distance unit. Options: - 'm': meter - 'mm': milimeter - 'in': inch - 'ft': foot cfswitch (bool): Celsius/Fahrenheit temperature switch. Default is False. If True, this switches temperature from K-->C or from R-->F Default keeps units as shown in above table. """ keys = ['dist', 'force', 'stress', 'temp', 'density', 'time'] m_newton = ['m', 'N', 'Pa', 'K', 'kg/(m^3)', 's'] mm_newton = ['mm', 'N', 'MPa', 'K', 'tonne/(mm^3)', 's'] in_lbf = ['in', 'lbf', 'psi', 'R', 'slinch/(in^3)', 's'] ft_lbf = ['ft', 'lbf', 'psf', 'R', 'slug/(ft^3)', 's'] unit_systems = [m_newton, mm_newton, in_lbf, ft_lbf] vals = [usys for usys in unit_systems if usys[0] == dist_unit][0] if cfswitch: # switches K-->C and R-->F newunit = {'K':'C', 'R':'F'} vals['temp'] = newunit[vals['temp']] # set values adict = dict(zip(keys, vals)) adict['displ'] = adict['dist'] print('================================================') print('Units have been set to %s_%s' % (adict['dist'], adict['force'])) for key in adict: print('For %s use %s' % (key, adict[key])) print('================================================') self.units = adict def get_units(self, *args): """Returns units for the passed arguments. Accepts and returns a list of strings. Options for inputs: - 'dist' - 'displ' (same units as 'dist') - 'force' - 'stress' - 'temp' - 'density' - 'time' """ res = [] for arg in args: mystr = '' if arg in self.units: # check if the requested item is in the units dict mystr = ' ('+ self.units[arg] + ')' elif arg in base_classes.FIELDTYPE: # return units on results dict item, ex: stress, strain etc ftype = base_classes.FIELDTYPE[arg] if ftype in self.units: mystr = ' ('+ self.units[ftype] + ')' res.append(mystr) return res def set_time(self, time): """Sets the time in the FeaModel (preprocessor). This time is used when setting loads. Args: time (float): the time to set """ self.time = time def get_item(self, item): """Returns an item given a string identifying the item. Args: item (str): 'A0' or 'P0' or 'L0' etc. """ if item[0] == 'P': # get point items = self.points num = int(item[1:]) res = [a for a in items if a.id == num] return res[0] elif item[0] == 'L' or item[1] == 'L': # get line items = self.signlines num = int(item[1:]) items = [a for a in items if a.get_name() == item] return items[0] elif item[0] == 'A': # get area items = self.areas num = int(item[1:]) items = [a for a in items if a.id == num] return items[0] elif item[0] == 'E': # get element items = self.elements items = [a for a in items if a.get_name() == item] return items[0] elif item[0] == 'N': # get node items = self.nodes items = [a for a in items if a.get_name() == item] return items[0] else: print('Unknown item! Please pass the name of a point, line or area!') def get_items(self, items): """Returns a list of correctly typed items. Input can be a single string or item, or a list of strings identifying items. """ res_items = base_classes.listify(items) for ind, item in enumerate(res_items): if isinstance(item, str): res_items[ind] = self.get_item(item) return res_items def make_matl(self, name): """Makes and returns a new material. Args: name (str): the material's name """ mat = material.Material(name) self.matls.append(mat) return mat def make_part(self): """Makes and returns a new part.""" #p = part.Part(self) #self.parts.append(p) return partmodule.Part(self) def make_problem(self, problem_type='struct', parts='all'): """Makes and returns a new problem, which can be solved. Args: problem_type (str): problem type, options: 'struct': structural parts (str Part or list of Part): Parts the model will analyze. Options: - 'all': add all parts. This is the default. - Part: add the single part - list of Part: add these parts """ if parts == 'all': parts = self.parts prob = problem.Problem(self, problem_type, parts) return prob def print_summary(self): """Prints a summary of the items in the database. """ items = ['parts', 'areas', 'signlines', 'points', 'elements', 'faces', 'nodes'] names = ['parts', 'areas', 'lines', 'points', 'elements', 'faces', 'nodes'] spacer = '----------------------------------' print(spacer) print("Items in the database:") for (item_str, name) in zip(items, names): items = getattr(self, item_str) print(' %s: %i' % (name, len(items))) print(spacer) def plot_nodes(self, fname='', display=True, title='Nodes', nnum=False): """Plots the selected nodes. Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown title (str): the plot's title nnum (bool): if True node numbers are plotted """ nodes = self.view.nodes if len(nodes) > 0: # plotting elements fig = plt.figure() axis = fig.add_subplot(111) # plot nodes, this is quicker than individual plotting axs = [node.y for node in nodes] rads = [node.x for node in nodes] axis.scatter(axs, rads, s=7, color='black') if nnum: for node in nodes: node.label(axis) # set units [d_unit] = self.get_units('dist') plt.title(title) plt.xlabel('axial, y'+d_unit) plt.ylabel('radial, x'+d_unit) plt.axis('scaled') # extract max and min for plot window radials = [n.x for n in nodes] axials = [n.y for n in nodes] # finish pot base_classes.plot_set_bounds(plt, axials, radials) base_classes.plot_finish(plt, fname, display) else: # no elements exist or no elemnts are selected res = '' if len(self.nodes) == 0: res = 'No nodes exist! Try meshing your parts!' else: res = 'No nodes are selected! Select some!' print(res) def plot_elements(self, fname='', display=True, title='Elements', enum=False, nshow=False, nnum=False): """Plots the selected elements. Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown title (str): the plot's title enum (bool): if True element numbers are plotted nshow (bool): True=plot nodes, False=don't plot them nnum (bool): if True node numbers are plotted """ nodes = self.view.nodes elements = self.view.elements if len(elements) > 0: # plotting elements fig = plt.figure() axis = fig.add_subplot(111) polys = [] for element in elements: poly = element.get_poly() polys.append(poly) coll = PatchCollection(polys, facecolors=FCOLOR, edgecolors=ECOLOR) axis.add_collection(coll) # plot element numbers if enum: for element in elements: element.label(axis) # plot nodes, this is quicker than individual plotting if nshow: axs = [node.y for node in nodes] rads = [node.x for node in nodes] axis.scatter(axs, rads, s=7, color='black') if nnum: for node in nodes: node.label(axis) # set units [d_unit] = self.get_units('dist') plt.title(title) plt.xlabel('axial, y'+d_unit) plt.ylabel('radial, x'+d_unit) plt.axis('scaled') # extract nodes for elements if no nodes are selected if len(nodes) == 0: tmp = set() for element in elements: tmp.update(element.nodes) nodes = list(tmp) # extract max and min for plot window radials = [n.x for n in nodes] axials = [n.y for n in nodes] # finish pot base_classes.plot_set_bounds(plt, axials, radials) base_classes.plot_finish(plt, fname, display) else: # no elements exist or no elemnts are selected res = '' if len(self.elements) == 0: res = 'No elements exist! Try meshing your parts!' else: res = 'No elements are selected! Select some!' print(res) def plot_pressures(self, fname='', display=True): """Plots the load step pressures on the passed part(s) or selected elements. This is an element plot, with arrows showing pressure magnitude and directions. Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown. Default = True """ elements = self.view.elements faces = self.view.faces nodes = self.view.nodes # plot all elements and store length, length determins min pressure arrow if len(elements) > 0: # plotting elements fig = plt.figure() axis = fig.add_subplot(111, aspect='equal') # plot polys and calculate avg face length polys = [] face_len = [] for element in elements: face_len.append(element.face[1].length()) poly = element.get_poly() polys.append(poly) coll = PatchCollection(polys, edgecolors=ECOLOR, facecolors=FCOLOR) axis.add_collection(coll) face_len = sum(face_len)/len(face_len) # store pressures we'll want to plot: list of [face, pval] plist = [] for load in self.loads[self.time]: if load.ltype in ['press', 'press_fluid']: loadlist = load.get_list() for [face, pval] in loadlist: if face in faces: plist.append([face, pval]) pressures = [abs(pval) for [face, pval] in plist] # set max and min pressure bounds pmin = min(pressures) pmax = max(pressures) # extract nodes for elements if no nodes are selected if len(nodes) == 0: tmp = set() for element in elements: tmp.update(element.nodes) nodes = list(tmp) radials = [p.x for p in nodes] axials = [p.y for p in nodes] # arrow length = arrow_min + (pval-pmin)*mult arrow_min = face_len mult = 0 if pmax != pmin: adelta = max(axials) - min(axials) rdelta = max(radials) - min(radials) delta = max(adelta, rdelta) dist = delta*0.2 mult = dist/(pmax - pmin) # make tick list for later plot, and color map cbar_val = None tick_list = [] cmap = None if pmax != pmin: # we have a range of values we're plotting tick_list = linspace(pmin, pmax, 8) cmap = plt.get_cmap(CMAP) else: cbar_val = pmin pmax = pmin + 1.0 pmin = pmin - 1.0 tick_list = [pmin, cbar_val, pmax] # default 3 values to plot one solid color cmap = colors.ListedColormap(['b', 'b']) # default to plot one val # set color contours for arrows cnorm = colors.Normalize(vmin=pmin, vmax=pmax) scalarmap = cmx.ScalarMappable(norm=cnorm, cmap=cmap) scalarmap.set_array([]) # make arrows, store axials + radials for [face, pval] in plist: [face_point, unit] = face.get_mnorm() arrow_length = arrow_min + (abs(pval) - pmin)*mult other_point = face_point + unit*arrow_length radials.append(other_point.x) axials.append(other_point.y) # compression tail = other_point delta = face_point - other_point if pval < 0: # tension tail = face_point delta = other_point - face_point headwidth = face_len*0.2 headlength = face_len*0.3 colorval = scalarmap.to_rgba(abs(pval)) plt.arrow(tail.y, tail.x, delta.y, delta.x, color=colorval, head_width=headwidth, head_length=headlength, length_includes_head=True) # set the horizontal and vertical axes base_classes.plot_set_bounds(plt, axials, radials) # set units and titles [d_unit, p_unit, t_unit] = self.get_units('dist', 'stress', 'time') tstr = 'Pressures %s\nTime=%f%s' % (p_unit, self.time, t_unit) plt.title(tstr) plt.xlabel('axial, y'+d_unit) plt.ylabel('radial, x'+d_unit) # set the colorbar cbar = plt.colorbar(scalarmap, orientation='vertical', ticks=tick_list) if cbar_val != None: cbar.ax.set_yticklabels(['', str(cbar_val), '']) base_classes.plot_finish(plt, fname, display) else: res = '' if len(self.elements) == 0: res = 'No elements exist! Try meshing your parts!' else: res = 'No elements are selected! Select some!' print(res) def plot_constraints(self, fname='', display=True): """Plots the constraints on the passed part(s) or selected items. This is an element and node plot, with arrows showing displacement constraints. Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown. Default = True """ elements = self.view.elements nodes = self.view.nodes # plot all elements if len(elements) > 0: # plotting elements fig = plt.figure() axis = fig.add_subplot(111, aspect='equal') # plot polys and calculate avg face length polys = [] face_len = [] for element in elements: face_len.append(element.face[1].length()) poly = element.get_poly() polys.append(poly) coll = PatchCollection(polys, edgecolors=ECOLOR, facecolors=FCOLOR) axis.add_collection(coll) face_len = sum(face_len)/len(face_len) # store displacements we'll plot: list of [node, dict ux,uy,uz] ulist = [] vals = [] for load in self.loads[self.time]: if load.ltype in ['ux', 'uy', 'uz']: alist = load.get_list() for nodelist in alist: node = nodelist[0] if node in nodes: ulist += [nodelist] vals += list(nodelist[1].values()) # check min and max bounds pmin = min(vals) pmax = max(vals) # make tick list for later plot, and color map cbar_val = None tick_list = [] cmap = None if pmax != pmin: # we have a range of values we're plotting tick_list = linspace(pmin, pmax, 8) cmap = plt.get_cmap(CMAP) else: cbar_val = pmin pmax = pmin + 1.0 pmin = pmin - 1.0 tick_list = [pmin, cbar_val, pmax] # default 3 values to plot one solid color cmap = colors.ListedColormap(['b', 'b']) # default to plot one val # set color contours for arrows cnorm = colors.Normalize(vmin=pmin, vmax=pmax) scalarmap = cmx.ScalarMappable(norm=cnorm, cmap=cmap) scalarmap.set_array([]) # make arrows for displacements # extract nodes for elements if no nodes are selected if len(nodes) == 0: tmp = set() for element in elements: tmp.update(element.nodes) nodes = list(tmp) radials = [p.x for p in nodes] axials = [p.y for p in nodes] pvect = {'ux':geometry.Point(1, 0, 0), 'uy':geometry.Point(0, 1, 0), 'uz':geometry.Point(0, 0, 1)} for [node, udict] in ulist: for (key, val) in udict.items(): headw = face_len*0.4 headl = headw point = geometry.Point(node.x, node.y, node.z) unit = pvect[key] # nonzero displacement, draw from node tail = point head = tail + unit*val store_point = head if val == 0: # zero displ, draw to node head = point tail = head - unit*face_len store_point = tail delta = head - tail radials.append(store_point.x) axials.append(store_point.y) colorVal = scalarmap.to_rgba(val) plt.arrow(tail.y, tail.x, delta.y, delta.x, color=colorVal, head_width=headw, head_length=headl, length_includes_head=True) # set the horizontal and vertical axes base_classes.plot_set_bounds(plt, axials, radials) # set units + titles [d_unit, t_unit] = self.get_units('dist', 'time') tstr = 'Constraints %s\nTime=%f%s' % (d_unit, self.time, t_unit) plt.title(tstr) plt.xlabel('axial, y'+d_unit) plt.ylabel('radial, x'+d_unit) # set the colorbar cbar = plt.colorbar(scalarmap, orientation='vertical', ticks=tick_list) if cbar_val != None: cbar.ax.set_yticklabels(['', str(cbar_val), '']) base_classes.plot_finish(plt, fname, display) else: res = '' if len(self.elements) == 0: res = 'No elements exist! Try meshing your parts!' else: res = 'No elements are selected! Select some!' print(res) def plot_multiple(self, fname='', display=True, title='', styledict={'labels':['points', 'lines', 'areas', 'parts'], 'items':['points, lines, areas']}): """Plots items of type styledict['items'], labels styledict['labels'] Only the items currently selected will be plotted. """ # http://stackoverflow.com/questions/470690/how-to-automatically-generate-n-distinct-colors/4382138#4382138 # http://stackoverflow.com/questions/2328339/how-to-generate-n-different-colors-for-any-natural-number-n # start plotting fig = plt.figure() axis = fig.add_subplot(111) # check if we're plotting parts or areas and pick a color map colormaker, ids = None, None color_plot = False if 'parts' in styledict['items']: color_plot = True ids = [item.id for item in self.view.parts] if 'areas' in styledict['items']: styledict['items'].remove('areas') if 'areas' in styledict['items']: color_plot = True ids = [item.id for item in self.view.areas] if color_plot: cmap = plt.get_cmap(GEOM_CMAP) norm = colors.Normalize(vmin=min(ids), vmax=max(ids)) colormaker = cmx.ScalarMappable(norm=norm, cmap=cmap) # plot the items for item_type in ['points', 'lines', 'areas', 'parts']: plot_on = item_type in styledict['items'] label_on = item_type in styledict['labels'] items = getattr(self.view, item_type) if plot_on and label_on: for item in items: if color_plot and item_type in ['areas', 'parts']: color = colormaker.to_rgba(item.id) item.plot(axis, True, color) else: item.plot(axis, True) elif plot_on: for item in items: if color_plot and item_type in ['areas', 'parts']: color = colormaker.to_rgba(item.id) item.plot(axis, False, color) else: item.plot(axis, False) elif label_on: for item in items: item.label(axis) # set the horizontal and vertical axes points = self.view.points radials = [point.x for point in points] axials = [point.y for point in points] base_classes.plot_set_bounds(plt, axials, radials) # set units [d_unit] = self.get_units('dist') # show plot plt.title(title) plt.xlabel('axial, y'+d_unit) plt.ylabel('radial, x'+d_unit) axis.set_aspect('equal') base_classes.plot_finish(plt, fname, display) def plot_parts(self, fname='', display=True, title='Parts', label=True): """Plots selected parts Defaults to displaying labels on: parts and filling all part areas. Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown. Default = True title (str): the plot's title label (bool): if True all Part objects will be labeled """ item = 'parts' styledict = {'items':[], 'labels':[]} if label: styledict['items'].append(item) styledict['labels'].append(item) else: styledict['items'].append(item) self.plot_multiple(fname, display, title, styledict) def plot_areas(self, fname='', display=True, title='Areas', label=True): """Plots selected areas Defaults to displaying labels on: areas and filling all part areas. Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown. Default = True title (str): the plot's title label (bool): if True all areas will be labeled """ item = 'areas' styledict = {'items':[], 'labels':[]} if label: styledict['items'].append(item) styledict['labels'].append(item) else: styledict['items'].append(item) self.plot_multiple(fname, display, title, styledict) def plot_lines(self, fname='', display=True, title='Lines', label=True): """Plots selected lines and arcs Defaults to displaying labels on: lines and arcs Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown. Default = True title (str): the plot's title label (bool): if True all lines and arcs will be labeled """ item = 'lines' styledict = {'items':[], 'labels':[]} if label: styledict['items'].append(item) styledict['labels'].append(item) else: styledict['items'].append(item) self.plot_multiple(fname, display, title, styledict) def plot_points(self, fname='', display=True, title='Points', label=True): """Plots selected points Defaults to displaying labels on: points Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown. Default = True title (str): the plot's title label (bool): if True all points will be labeled """ item = 'points' styledict = {'items':[], 'labels':[]} if label: styledict['items'].append(item) styledict['labels'].append(item) else: styledict['items'].append(item) self.plot_multiple(fname, display, title, styledict) def plot_geometry(self, fname='', display=True, title='Geometry', pnum=True, lnum=True, anum=True, afill=True): """Plots selected geometry items: areas, lines, points. Defaults to displaying labels on: areas, lines, and points, and filling in the areas. Args: fname (str): png image file prefix, if given, image will be saved display (bool): if True, interactive plot will be shown, if False plot will not be shown. Default = True title (str): the plot's title pnum (bool): if True all Point objects will be labeled lnum (bool): if True all Line and Arc objects will be labeled anum (bool): if True all Area objects will be labeled afill (bool): if True all Area objects will be filled """ styledict = {'items':[], 'labels':[]} if pnum: styledict['items'].append('points') styledict['labels'].append('points') if lnum: styledict['items'].append('lines') styledict['labels'].append('lines') if anum: styledict['labels'].append('areas') if afill: styledict['items'].append('areas') elif anum == False and afill: styledict['items'].append('areas') self.plot_multiple(fname, display, title, styledict) @staticmethod def __get_cname(items): """Returns a component name prefix, for labeling lists of items. Args: items (list): a list of the items we'll be making a component of """ cname = '' if len(items) == 1: if items[0] == 'all': cname = 'all' else: cname = items[0].get_name() else: cname = items[0].get_name()+'-'+items[-1].get_name() return cname def __get_make_comp(self, comp): """Stores component if it doesn't exist, returns it if it does. Args: comp (Component): component we want to store/get """ if comp not in self.components: comp = self.components.append(comp) else: ind = self.components.index(comp) comp = self.components[ind] return comp def __get_make_surfint(self, surfint): """Stores surfac interaction if it doesn't exist, returns it if it does. Args: surfint (connectors.SurfaceInteraction): item to get or make """ items = [item for item in self.surfints if item.int_type == surfint.int_type] if surfint.int_type == 'LINEAR': for item in items: if item.k == surfint.k: return item elif surfint.type == 'EXPONENTIAL': for item in items: if item.c0 == surfint.c0 and item.p0 == surfint.p0: return item surfint = self.surfints.append(surfint) return surfint def register(self, item): """Adds an item to the feamodel. Item is added to the correct list and its id is updated. Allowed Items: * Point * Line * Arc * SignLine * SignArc * LineLoop * Area * Part """ class_name = item.__class__.__name__ class_to_list = {'Point': 'points', 'Line': 'lines', 'Arc': 'lines', 'SignLine': 'signlines', 'SignArc': 'signlines', 'LineLoop': 'lineloops', 'Area': 'areas', 'Part': 'parts'} if class_name in class_to_list: list_name = class_to_list[class_name] getattr(self, list_name).append(item) else: print('ERROR: the item you passed must be a geometry item!') print('You passed a %s' % class_name) message = ("Allowed items: Point, Line, Arc, SignLine, SignArc, " "LineLoop, Area, Part") print(message) def __add_load(self, load, time): """Adds a load to the FeaModel. Args: load (Load or Load_Linear): th eload to add time (float): the load's time point. The first time point is 1.0 """ if time in self.loads: self.loads[time].append(load) else: self.loads[time] = [load] return load def scale(self, unitstr, point_first=None, point_last=None): """Scales the points from [point_first, ..., point_last] using unitstr If point_first and point_last are both None, all points are scaled. Args: unitstr (str): string scalar 'fromunit-tounit' using the below units: * mm, m, in, ft * Examples 'mm-m' 'm-in' 'ft-mm' 'in-ft' * Default value is '' and does not apply a scale factor point_first (Point or None or str): the first point to scale, string point names may be passed point_last (Point or None or str): the last point to scale, string point names may be passed """ # convert to points if passing in string point names 'P0' if isinstance(point_first, str): point_first = self.get_item(point_first) if isinstance(point_last, str): point_last = self.get_item(point_last) ind_first = 0 if point_first != None: ind_first = self.points.index(point_first) ind_last = -1 if point_last != None: ind_last = self.points.index(point_last) unit_size = {'m':1.0, 'mm':.001, 'ft':.3048, 'in':0.0254} units = unitstr.split('-') from_unit, to_unit = units[0], units[1] scalar = unit_size[from_unit]/unit_size[to_unit] # scale all points in set # update all dependent arcs # update all dependent line loops # update all dependent areas # update all dependent parts def set_gravity(self, grav, items): """Sets gravity on the elements in items. Assumes gravity acts in the -x direction with magnitude grav. Args: grav (float): gravity acceleration, MUST BE POSTIVE items (Area or Part or list): items gravity acts on - Area: gravity acts on elements in this area - Part: gravity acts on elements in this part - list of Part or Area: gravity acts on their elements """ items = self.get_items(items) ctype = 'elements' cname = self.__get_cname(items) # make compoenet comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # make load ltype = 'gravity' time = self.time load = loads.ConstLoad(ltype, comp, grav) self.__add_load(load, time) return load def set_rpm(self, rpm, items): """Sets rpm rotation load on the items. Args: rpm (float): rotation per minute items (Area or Part or list): items rotation acts on - Area: rotation acts on elements in this area - Part: rotation acts on elements in this part - list of Part or Area: rotation acts on their elements """ # applies rpm to the items items = self.get_items(items) ctype = 'elements' cname = self.__get_cname(items) # make compoenet comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # make load ltype = 'rpm' time = self.time load = loads.ConstLoad(ltype, comp, rpm) self.__add_load(load, time) return load def set_radps(self, radps, items): """Sets radians per second rotation load on the items. Args: radps (float): radians per second items (Area or Part or list): items rotation acts on - Area: rotation acts on elements in this area - Part: rotation acts on elements in this part - list of Part or Area: rotation acts on their elements """ items = self.get_items(items) ctype = 'elements' cname = self.__get_cname(items) # make compoenet comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # make load ltype = 'radps' time = self.time load = loads.ConstLoad(ltype, comp, radps) self.__add_load(load, time) return load def set_fluid_press(self, items, rho, g, xo, po): """This sets a fluid presure load on items. This fluid pressure is dependednt on the x axis. g must be positive. - P = f(x) - P(xo) = po - P(x) = po + rho*g*(xo - x) Args: items (str or Line or Arc or list): items to set pressure on - str: string name of Line or Arc item, for example 'L0' - Line or Arc: set presssure on this - list or Line or Arc: set pressure on these rho (float): fluid density in mass/volume g (+float): gravity in dist/(t^2), MUST BE POSITIVE xo (float): sea level height, MUST BE POSITIVE po (float): sea level pressure, MUST BE POSITIVE """ items = self.get_items(items) ctype = 'faces' cname = self.__get_cname(items) # make compoenet comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # make load ltype = 'press_fluid' mult = rho*g load = loads.LinearLoad(ltype, comp, po, mult, xo) self.__add_load(load, self.time) return load def set_load(self, ltype, items, lval, ldir=None): """Sets a pressure or force load on line(s). The force load is divided by the number of nodes and applied on each node. The pressure load is applied to the child faces under the line. Positive is compression, negative is tension. Args: ltype (str): 'press' or 'force' items (str or Line or Arc or list): items to set load on - str: string name of Line or Arc item, for example 'L0' - Line or Arc: set load on this - list or Line or Arc: set load on these lval (float): load value. - For ltype = 'press' this is in force/area units - For ltype = 'force' this is in force units ldir (None or str): load direction. Defaults to None - str: when ltype='load', we need to set ldir to 'x' or 'y' - None: when ltype='press' """ items = self.get_items(items) ctype = 'nodes' if ltype == 'press': ctype = 'faces' cname = self.__get_cname(items) # make compoenet comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # make load if ltype == 'force': ltype = 'f'+ldir # for example fx load = loads.ConstLoad(ltype, comp, lval) self.__add_load(load, self.time) return load def set_constr(self, ltype, items, axis, val=0.0): """Sets a displacement constraint on the passed item(s). Args: ltype (str): 'fix' or 'displ' - 'fix': val arg should not be passed - 'displ': val arg must be passed items (str, SignLine, SignArc, Point or list): item(s) to apply the constraint on - str: this string must be a line or point name - SignLine or SignArc: the constraint will be applied to this item - Point: the constraint will be applied on this item - list of SignLine or SignArc: the constraint will be appplied on these - list of Point: the constraint will be appplied on these - list of str names: pass line or point names, constr applied on them axis (str): load axis, 'x' or 'y' val (float): displacement value, defaults to 0.0, only needs to be set when ltype = 'displ' """ items = self.get_items(items) cname = self.__get_cname(items) ctype = 'nodes' # make compoenet comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # make load ltype = 'u'+axis # for example ux load = loads.ConstLoad(ltype, comp, val) self.__add_load(load, self.time) return load def set_contact_linear(self, master_lines, slave_lines, kval, many_si=False): """Sets contact between master and slave lines. Slave lines are on the more compliant or more curved object. Args: master_lines (list): list of SignLine or SignArc slave_lines (list): list of SignLine or SignArc kval (float): stiffness, 5 to 50 times the youngs modulus of the touching matl many_si (bool): True, make unique surface interaction for every contact False, use existing surface interaction if we can """ master_items = self.get_items(master_lines) master_cname = self.__get_cname(master_items) ctype = 'faces' master_comp = components.Component(master_items, ctype, master_cname) master_comp = self.__get_make_comp(master_comp) slave_items = self.get_items(slave_lines) slave_cname = self.__get_cname(slave_items) ctype = 'faces' slave_comp = components.Component(slave_items, ctype, slave_cname) slave_comp = self.__get_make_comp(slave_comp) surf_int = connectors.SurfaceInteraction('LINEAR', kval) if many_si: surf_int = self.surfints.append(surf_int) else: surf_int = self.__get_make_surfint(surf_int) cont = connectors.Contact(master_comp, slave_comp, surf_int, True) self.contacts.append(cont) def set_eshape(self, eshape='quad', eorder=2): """Sets the element shape and order to use when meshing the model. Args: eshape (str): element shape - 'quad': quadrilaterials - 'tri': triangles eorder (int): element order, default=2 - 1: corner nodes only (3 and 4 noded elements) - 2: corder nodes and midside nodes (6 and 8 noded elements) """ self.eshape = eshape # quad or tri self.eorder = eorder # 1 or 2 def set_etype(self, etype, items, thick=None): """Sets the element type, and thickness on areas or parts. Args: etype (str): element type - 'plstress': plane stress - 'plstrain': plane strain - 'axisym': axisymmetric items (str or Area or Part or list): set element type on these - str: string name of Area or Part item. Example: 'A0', 'PART0' - Area: set element type on the elements in this area - Part: set element type on the elements in this part - list of Area or Part: set element type on their elements thick (float or None): element thickness - None: default, used for axisymmetric element type - float: thickness value to use for plane stress or plane strain elements """ items = self.get_items(items) # set the element types on the areas, this is used to fix # elements when importing them from the inp file for item in items: if isinstance(item, geometry.Area): item.set_etype(etype) if isinstance(item, partmodule.Part): for area in item.areas: area.set_etype(etype) # set a thickness component if needed if etype != 'axisym' and thick != None: # make component for element thickness cname = self.__get_cname(items) ctype = 'nodes' comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # add load ltype = 'nodal_thickness' time = 0.0 load = loads.ConstLoad(ltype, comp, thick) self.__add_load(load, time) return load return None def set_matl(self, matl, items): """Sets the material on Part or Area items. Args: matl (Material): material to assign to items items (Part or Area or list): items that will have matl assigned - Part: assign matl to this - Area: assign matl to this - list of Area or Part: assign matl to these """ items = self.get_items(items) cname = self.__get_cname(items) # store the material if it's not already if matl not in self.matls: matl = self.matls.append(matl) # make component ctype = 'elements' comp = components.Component(items, ctype, cname) comp = self.__get_make_comp(comp) # make load ltype = 'matl' time = 0.0 load = loads.ConstLoad(ltype, comp, matl) self.__add_load(load, time) return load def __read_inp(self, fname): """Reads in the mesh from a Calculix inp file. All nodes, elements, and faces are read. Child nodes, elements and faces are assigned to their geometry parents (points, lines, arcs, areas, parts. Args: fname (str): file name to read, must include '.inp' extension """ f = open(fname, 'r') mode = None set_name = None set_type = None items = [] # holder for nodes or elements in nsets or esets N = base_classes.Meshlist() # store nodes E = base_classes.Meshlist() # store elements, allows renumbering before putting int model F = [] # store faces sets = {'E':{}, 'N':{}} # store sets etype = '' Dict_NodeIDs={} Dict_ElemIDs={} # read in input file for line in f: if line[0] != '*': if mode == 'nmake': L = line.split(',') L = [a.strip() for a in L] (nnum, x, y, z) = (int(L[0]), float(L[1]), float(L[2]), float(L[3])) node = mesh.Node(nnum, x, y, z) N.append(node) Dict_NodeIDs[nnum]=node elif mode == 'emake': L = line.split(',') L = [int(a.strip()) for a in L] enum = L[0] nlist = [Dict_NodeIDs[a] for a in L[1:]] e = mesh.Element(enum, etype, nlist) faces = e.faces E.append(e) Dict_ElemIDs[enum]=e F += faces sets[set_type][set_name].append(e) elif mode == 'set': L = line.split(',') L = [a.strip() for a in L] L = [int(a) for a in L if a != ''] items = [] if set_type == 'E': items = [Dict_ElemIDs[a] for a in L if a in Dict_ElemIDs.keys()] elif set_type == 'N': items = [Dict_NodeIDs[a] for a in L] if items == [None]*len(items): pass # the elements were not found else: sets[set_type][set_name] += items # mode setting if '*Node' in line or '*NODE' in line: mode = 'nmake' elif '*Element' in line or '*ELEMENT' in line: L = line.split(',') # split it based on commas e = L[1].split('=') etype = e[1] # exclude T elements made in gmsh if etype[0] != 'T': e = L[2].split('=') set_name = e[1].strip() set_type = 'E' sets[set_type][set_name] = [] mode = 'emake' else: mode = None elif '*ELSET' in line: L = line.split(',') e = L[1].split('=') set_name = e[1].strip() set_type = 'E' sets[set_type][set_name] = [] mode = 'set' elif '*NSET' in line: L = line.split(',') e = L[1].split('=') set_name = e[1].strip() set_type = 'N' sets[set_type][set_name] = [] mode = 'set' f.close() # loop through sets and remove empty sets # store sets to delete todel = [] for (set_type, set_dict) in sets.items(): for (set_name, item_list) in set_dict.items(): if item_list == []: todel.append({'set_type':set_type, 'set_name':set_name}) # delete the empty sets for adict in todel: (set_type, set_name) = (adict['set_type'], adict['set_name']) del sets[set_type][set_name] #print('Empty set type:%s name:%s deleted' % (set_type, set_name)) # this resets the min element to number 1 if E.get_minid() > 1: E.set_minid(1) #----------------------------------- # Node and element assignment back onto parts, areas, lines, points #----------------------------------- # assign elements + nodes to feamodel self.elements = E self.faces = F # remove arc center nodes from imported node set # those nodes have no elements under them torem = [] for node in N: if len(node.elements) == 0: torem.append(node) for node in torem: N.remove(node) # assign nodes to feamodel self.nodes = N for part in self.parts: # assign part element and node sets pname = part.get_name() part.elements = sets['E'][pname] part.nodes = sets['N'][pname] # assign all nodes and elements to areas, fix the element types for area in part.areas: aname = area.get_name() area.elements = sets['E'][aname] area.nodes = sets['N'][aname] area.set_child_ccxtypes() #paint element types on elements # assign the child nodes to points pts = part.points for point in pts: ndist = [] for node in part.nodes: p_tmp = geometry.Point(node.x, node.y) dist = point - p_tmp dist = dist.length() ndist.append({'dist':dist, 'node':node}) # sort the list by dist, sorts low to high ndist = sorted(ndist, key=lambda k: k['dist']) point.nodes = [ndist[0]['node']] #print('Point %s = node %s' % (pt, pt.nodes)) # assign the nodes and n1 and faces to lines slines = part.signlines for sline in slines: lname = sline.get_name() area = sline.lineloop.parent nodes = sets['N'][sline.line.get_name()] n1 = [n for n in nodes if n.order == 1] sline.nodes = nodes sline.n1 = n1 # make a set of all faces that contain the line's node allfaces = set() for node in nodes: allfaces.update(node.faces) # make a set of all faces on the line only linefaces = set() for face in allfaces: if set(face.nodes).issubset(set(nodes)): linefaces.add(face) # and only the ones whose element centers are in the area faces = set() for face in linefaces: is_lineface = area.contains_point(face.element.center) if is_lineface: faces.add(face) sline.faces = list(faces) print('Elements: %i' % len(E)) print('Nodes: %i' % len(N)) print('Done reading Calculix/Abaqus .inp file') def mesh(self, size=1.0, meshmode='fineness', mesher='gmsh'): """Meshes all parts. Args: size (float): - if meshmode == 'fineness' (default): - mesh size is adapted to geometry size - set size = 0.0001 - 1.0, to define how fine the mesh is. - Low numbers are very fine, higher numbers are coarser. - if meshmode == 'esize': - element size is kept constant - choose it depending on geometry size - it should be reduced e.g. at arcs with small radius, by calling line.esize function meshmode (str): - 'fineness': adapt mesh size to geometry - 'esize': keep explicitly defined element size meshmode is changed to 'esize' is used if esize property is set to points or lines mesher (str): the mesher to use - 'gmsh': mesh with Gmsh, this is reccomended, it allows holes - 'cgx': mesh with Calculix cgx, it doesn't allow holes """ #check if element size is set to points and change meshmode if necessary for pt in self.points: if pt.esize != None: if meshmode=='fineness': print('meshmode is changed to esize, because elementsize was defined on points!') meshmode = 'esize' #if meshmode esize is chosen: ediv's on lines and arcs are transformed to element sizes on start and end point if meshmode == 'esize': for line in self.lines: if line.ediv != None: line.pt(0).set_esize(line.length()/line.ediv) line.pt(1).set_esize(line.length()/line.ediv) if mesher == 'gmsh': self.__mesh_gmsh(size, meshmode) elif mesher == 'cgx': self.__mesh_cgx(size) def __mesh_gmsh(self, size, meshmode, timeout=20): """Meshes all parts using the Gmsh mesher. Args: size (float): - if meshmode == 'fineness' (default): - mesh size is adapted to geometry size - set size = 0.0001 - 1.0, to define how fine the mesh is. - Low numbers are very fine, higher numbers are coarser. - if meshmode == 'esize': - element size is kept constant - choose it depending on geometry size meshmode (str): - 'fineness': adapt mesh size to geometry - 'esize': keep explicitly defined element size timeout (int): time in seconds before the process throws a subprocess.TimeoutExpired """ geo = [] ids = {} ids['line'] = {} ids['plane_surface'] = {} # write all points for pt in self.points: txtline = 'Point(%i) = {%f, %f, %f};' % (pt.id, pt.x, pt.y, 0.0) if meshmode == 'esize': #add element size to points if pt.esize == None: txtline = txtline.replace('}', ', %f}' % (size if self.eshape=='tri' else size*2.)) #txtline = txtline.replace('}', ', %f}' % (size)) else: txtline = txtline.replace('}', ', %f}' % (pt.esize if self.eshape=='tri' else pt.esize*2.)) #txtline = txtline.replace('}', ', %f}' % (pt.esize)) geo.append(txtline) # start storing an index number ind = self.points[-1].id + 1 # write all lines for line in self.lines: lnum = line.id ids['line'][lnum] = ind pt1 = line.pt(0).id pt2 = line.pt(1).id txtline = '' if isinstance(line, geometry.Arc): ctr = line.actr.id txtline = 'Circle(%i) = {%i, %i, %i};' % (ind, pt1, ctr, pt2) else: txtline = 'Line(%i) = {%i,%i};' % (ind, pt1, pt2) geo.append(txtline) # set division if we have it if line.ediv != None and meshmode=='fineness': ndiv = line.ediv+1 esize = line.length()/line.ediv if self.eshape == 'quad': ndiv = line.ediv/2+1 esize = esize*2 # this is needed because quad recombine # splits 1 element into 2 txtline = 'Transfinite Line{%i} = %i;' % (ind, ndiv) print('LINE ELEMENT SIZE: %f, MAKES %i ELEMENTS' % (line.length()/line.ediv, line.ediv)) geo.append(txtline) geo.append('Characteristic Length {%i,%i} = %f;' % (pt1, pt2, esize)) ind += 1 # write all areas for area in self.areas: if area.closed: aid = area.id aname = area.get_name() loop_ids = [] loops = [area.exlines] + area.holes for loop in loops: txtline = 'Line Loop(%i) = ' % (ind) loop_ids.append(str(ind)) line_ids = [] for sline in loop: lid = ids['line'][sline.line.id] prefix = '' if sline.sign == -1: prefix = '-' line_ids.append('%s%i' % (prefix, lid)) txtline = txtline + '{'+ ','.join(line_ids)+'};' geo.append(txtline) ind += 1 loop_ids = ','.join(loop_ids) geo.append('Plane Surface(%i) = {%s};' % (ind, loop_ids)) ids['plane_surface'][aid] = ind geo.append("Physical Surface('%s') = {%i};" % (aname, ind)) ind += 1 # write part area components for part in self.parts: # make components for each part txtline = "Physical Surface('%s') = " % (part.get_name()) area_ids = [] for area in part.areas: if area.closed: aid = ids['plane_surface'][area.id] area_ids.append(str(aid)) txtline = txtline + '{' + ','.join(area_ids) + '};' geo.append(txtline) # write all line componenets so we can get nodes out for line in self.lines: lid = ids['line'][line.id] txtline = "Physical Line('%s') = {%i};" % (line.get_name(), lid) geo.append(txtline) # write node componenets # ERROR: node list is not produced by gmsh for point in self.points: pname = point.get_name() txtline = "Physical Point('%s') = {%i};" % (pname, point.id) geo.append(txtline) # set the meshing options if meshmode == 'fineness': geo.append('Mesh.CharacteristicLengthFactor = '+str(size)+'; //mesh fineness') geo.append('Mesh.RecombinationAlgorithm = 1; //blossom') if self.eshape == 'quad': geo.append('Mesh.RecombineAll = 1; //turns on quads') geo.append('Mesh.SubdivisionAlgorithm = 1; // quadrangles only') #geo.append('Mesh.RecombinationAlgorithm = 1; //turns on blossom needed for quad') order = self.eorder geo.append('Mesh.CharacteristicLengthExtendFromBoundary = 1;') geo.append('Mesh.CharacteristicLengthMin = 0;') geo.append('Mesh.CharacteristicLengthMax = 1e+022;') # use this so small circles are meshed finely # this is broken in Gmsh 4.3.0 # geo.append('Mesh.CharacteristicLengthFromCurvature = 1;') geo.append('Mesh.CharacteristicLengthFromPoints = 1;') # geo.append('Mesh.Algorithm = 2; //delauny') #okay for quads geo.append('Mesh.Algorithm = 8; //delquad = delauny for quads') geo.append('Mesh.ElementOrder = ' +str(order) +'; //linear or second set here') if order == 2: geo.append('Mesh.SecondOrderIncomplete=1; //no face node w/ 2nd order') geo.append('Mesh.SaveGroupsOfNodes = 1; // save node groups') # write geo file to the local directory fname = self.fname+'.geo' fout = self.fname+'.inp' outfile = open(fname, 'w') for line in geo: #print (line) outfile.write(line+'\n') outfile.close() print('File: %s was written' % fname) # run file in bg mode, -2 is 2d mesh, makes required inp file runstr = "%s %s -2 -o %s" % (environment.GMSH, fname, fout) print(runstr) subprocess.check_call(runstr, timeout=timeout, shell=True) print('File: %s was written' % fout) print('Meshing done!') # write gmsh msh file, for manual checking only # not required by pycalculix runstr = "%s %s -2 -o %s" % (environment.GMSH, fname, self.fname+'.msh') subprocess.check_call(runstr, timeout=timeout, shell=True) print('File: %s.msh was written' % self.fname) # read in the calculix mesh self.__read_inp(self.fname+'.inp') def __mesh_cgx(self, size, meshmode, timeout=20): """Meshes all parts using the Calculix cgx mesher. Args: size (float): - if meshmode == 'fineness' (default): - mesh size is adapted to geometry size - set size = 0.0001 - 1.0, to define how fine the mesh is. - Low numbers are very fine, higher numbers are coarser. - if meshmode == 'esize': NOT TESTED WITH CGX - element size is kept constant - choose it depending on geometry size meshmode (str): - 'fineness': adapt mesh size to geometry - 'esize': keep explicitly defined element size NOT TESTED WITH CGX timeout (int): time in seconds before the process throws a subprocess.TimeoutExpired """ fbd = [] comps = [] cfiles = [] # Calculix CGX elements # axisymmetric cgx_elements = {} cgx_elements['tri2axisym'] = 'tr6c' cgx_elements['tri1axisym'] = 'tr3c' cgx_elements['quad2axisym'] = 'qu8c' cgx_elements['quad1axisym'] = 'qu4c' # plane stress cgx_elements['tri2plstress'] = 'tr6s' cgx_elements['tri1plstress'] = 'tr3s' cgx_elements['quad2plstress'] = 'qu8s' cgx_elements['quad1plstress'] = 'qu4s' # plane strain cgx_elements['tri2plstrain'] = 'tr6e' cgx_elements['tri1plstrain'] = 'tr3e' cgx_elements['quad2plstrain'] = 'qu8e' cgx_elements['quad1plstrain'] = 'qu4e' num = 1.0/size emult = int(round(num)) # this converts size to mesh multiplier # write all points for point in self.points: linestr = 'pnt %s %f %f %f' % (point.get_name(), point.x, point.y, 0.0) fbd.append(linestr) # gmsh can't make node componenets so don't do it in cgx #L = 'seta %s p %s' % (pt.get_name(), pt.get_name()) #comps.append(L) # write all lines for line in self.lines: lname = line.get_name() pt1 = line.pt(0).get_name() pt2 = line.pt(1).get_name() linestr = '' if isinstance(line, geometry.Arc): # line is arc pctr = line.actr.get_name() linestr = 'line %s %s %s %s' % (lname, pt1, pt2, pctr) else: # straight line linestr = 'line %s %s %s' % (lname, pt1, pt2) # set division if we have it if line.ediv != None: ndiv = self.eorder*line.ediv linestr += ' '+str(int(ndiv)) fbd.append(linestr) linestr = 'seta %s l %s' % (lname, lname) comps.append(linestr) cfiles.append(lname) # write all areas for area in self.areas: if area.closed: linestr = 'gsur '+area.get_name()+' + BLEND ' line_ids = [] for line in area.signlines: lname = '+ '+line.get_name() if line.sign == -1: lname = '- '+line.get_name()[1:] line_ids.append(lname) linestr = linestr + ' '.join(line_ids) fbd.append(linestr) # add area component, nodes + elements astr = 'seta %s s %s' % (area.get_name(), area.get_name()) fbd.append(astr) cfiles.append(area.get_name()) # write part area components for apart in self.parts: # make components for each part # seta P0 s s0 s1 line = 'seta %s s ' % apart.get_name() cfiles.append(apart.get_name()) area_ids = [] for area in apart.areas: if area.closed: area_ids.append(area.get_name()) line = line + ' '.join(area_ids) fbd.append(line) # mesh all areas for area in apart.areas: aname = area.get_name() estr = self.eshape+str(self.eorder)+area.etype cgx_etype = cgx_elements[estr] fbd.append('elty %s %s' % (aname, cgx_etype)) fbd.append('div all mult %i' % emult) fbd.append('mesh all') # save mesh file fbd.append('send all abq') # add line and area components fbd += comps # save component node and element sets for comp in cfiles: # select nodes under fbd.append('comp %s do' % (comp,)) fbd.append('send %s abq names' % (comp,)) # this orients the view correctly # this is the same as switching to the z+ orientation # y us axial, x is radial fbd.append('rot z') fbd.append('rot c -90') fbd.append('plot e all') # write fbd file to the local directory fname = self.fname+'.fbd' f = open(fname, 'w') for line in fbd: #print (line) f.write(line+'\n') f.close() print('File: %s was written' % fname) # run file in bg mode runstr = "%s -bg %s" % (environment.CGX, fname) p = subprocess.check_call(runstr, timeout=timeout, shell=True) print('Meshing done!') # assemble the output files into a ccx input file inp = [] files = ['all.msh'] files += [f+'.nam' for f in cfiles] for fname in files: infile = open(fname, 'r') for line in infile: # cgx adds E and N prfixes on sets after =, get rid of these if '=' in line and fname != 'all.msh': L = line.split('=') line = L[0] + '=' + L[1][1:] inp.append(line.strip()) else: inp.append(line.strip()) infile.close() # delete file os.remove(fname) # write out inp file fname = self.fname+'.inp' outfile = open(fname, 'w') for line in inp: #print (line) outfile.write(line+'\n') outfile.close() print('File: %s was written' % fname) # read in the calculix mesh self.__read_inp(fname)
import numpy as np from ray.rllib.utils.framework import try_import_torch torch, nn = try_import_torch() # Custom initialization for different types of layers if torch: class Linear(nn.Linear): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.zeros_(self.bias) class Conv2d(nn.Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.zeros_(self.bias) class ConvTranspose2d(nn.ConvTranspose2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.zeros_(self.bias) class GRUCell(nn.GRUCell): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def reset_parameters(self): nn.init.xavier_uniform_(self.weight_ih) nn.init.orthogonal_(self.weight_hh) nn.init.zeros_(self.bias_ih) nn.init.zeros_(self.bias_hh) # Custom Tanh Bijector due to big gradients through Dreamer Actor class TanhBijector(torch.distributions.Transform): def __init__(self): super().__init__() self.bijective = True self.domain = torch.distributions.constraints.real self.codomain = torch.distributions.constraints.interval(-1.0, 1.0) def atanh(self, x): return 0.5 * torch.log((1 + x) / (1 - x)) def sign(self): return 1. def _call(self, x): return torch.tanh(x) def _inverse(self, y): y = torch.where((torch.abs(y) <= 1.), torch.clamp(y, -0.99999997, 0.99999997), y) y = self.atanh(y) return y def log_abs_det_jacobian(self, x, y): return 2. * (np.log(2) - x - nn.functional.softplus(-2. * x)) # Modified from https://github.com/juliusfrost/dreamer-pytorch class FreezeParameters: def __init__(self, parameters): self.parameters = parameters self.param_states = [p.requires_grad for p in self.parameters] def __enter__(self): for param in self.parameters: param.requires_grad = False def __exit__(self, exc_type, exc_val, exc_tb): for i, param in enumerate(self.parameters): param.requires_grad = self.param_states[i]
import numpy as np import src.Utils.plot as plot import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import src.Planning.PotentialField as pf if __name__ == '__main__': obstacle_map = plot.readBMPAsNumpyArray("../map/small_obstacle_map.bmp") h, w = obstacle_map.shape resolution = 0.05 * 6.0 params = {"zeta": 1, "d_goal": 5, "eta": 5, "q_star": 5, "max_potential": 200, "max_gradient": 10} goal = (20, 20) potential_field_gen = pf.PotentialField(obstacle_map, params, topology='euclidean', connectivity='vertex', resolution=resolution) potential_field_gen.set_goal(goal) obstacle_dist_map = potential_field_gen.get_obstacle_distance_map() repulsive_potential_field = potential_field_gen.get_repulsive_potential_field() attractive_potential_field = potential_field_gen.get_attractive_potential_field() total_potential_field = potential_field_gen.get_potential_field() plt.figure() plt.imshow(obstacle_dist_map, cmap='jet') plt.figure() plt.imshow(repulsive_potential_field, cmap='jet') plt.figure() plt.imshow(attractive_potential_field, cmap='jet') plt.figure() plt.imshow(total_potential_field, cmap='jet') potential_field_gen.get_negative_gradient((20, 21)) potential_field_gen.get_negative_gradient((44 * resolution, 34 * resolution)) gradient_x, gradient_y = potential_field_gen.get_negative_gradient_field() X = np.arange(0, w, 1) Y = np.arange(0, h, 1) plt.figure() plt.quiver(X, Y, gradient_x, gradient_y) plt.show()
#!/usr/bin/env python3 #import ev3dev.ev3 as ev3 from time import sleep import numpy as np def centre_line_finder(_angle2): angle_0 = (_angle2 - 0) angle_90 = (_angle2 - 90) angle_180 = (_angle2 - 180) angle_270 = (_angle2 - 270) angle = np.array([abs(angle_0), abs(angle_90), abs(angle_180), abs(angle_270)]) x = angle.argmin() e_angle = angle[x] c_angle = 0 if x == 0: e_angle = angle_0 c_angle = 0 elif x == 1: e_angle = angle_90 c_angle = 90 elif x == 2: e_angle = angle_180 c_angle = 180 elif x == 3: e_angle = angle_270 c_angle = 270 return e_angle, c_angle
#!/usr/bin/env python # Aesara tutorial # Solution to Exercise in section 'Configuration Settings and Compiling Modes' import numpy as np import aesara import aesara.tensor as at aesara.config.floatX = 'float32' rng = np.random N = 400 feats = 784 D = (rng.randn(N, feats).astype(aesara.config.floatX), rng.randint(size=N, low=0, high=2).astype(aesara.config.floatX)) training_steps = 10000 # Declare Aesara symbolic variables x = at.matrix("x") y = at.vector("y") w = aesara.shared(rng.randn(feats).astype(aesara.config.floatX), name="w") b = aesara.shared(np.asarray(0., dtype=aesara.config.floatX), name="b") x.tag.test_value = D[0] y.tag.test_value = D[1] #print "Initial model:" #print w.get_value(), b.get_value() # Construct Aesara expression graph p_1 = 1 / (1 + at.exp(-at.dot(x, w) - b)) # Probability of having a one prediction = p_1 > 0.5 # The prediction that is done: 0 or 1 xent = -y * at.log(p_1) - (1 - y) * at.log(1 - p_1) # Cross-entropy cost = at.cast(xent.mean(), 'float32') + \ 0.01 * (w ** 2).sum() # The cost to optimize gw, gb = at.grad(cost, [w, b]) # Compile expressions to functions train = aesara.function( inputs=[x, y], outputs=[prediction, xent], updates={w: w - 0.01 * gw, b: b - 0.01 * gb}, name="train") predict = aesara.function(inputs=[x], outputs=prediction, name="predict") if any(x.op.__class__.__name__ in ('Gemv', 'CGemv', 'Gemm', 'CGemm') for x in train.maker.fgraph.toposort()): print('Used the cpu') elif any(x.op.__class__.__name__ in ('GpuGemm', 'GpuGemv') for x in train.maker.fgraph.toposort()): print('Used the gpu') else: print('ERROR, not able to tell if aesara used the cpu or the gpu') print(train.maker.fgraph.toposort()) for i in range(training_steps): pred, err = train(D[0], D[1]) #print "Final model:" #print w.get_value(), b.get_value() print("target values for D") print(D[1]) print("prediction on D") print(predict(D[0]))
import pandas as pd import quandl, math import datetime import time import numpy as np from sklearn import preprocessing, cross_validation, svm from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt from matplotlib import style import pickle style.use('ggplot') df = quandl.get("WIKI/GOOGL") df = df[["Adj. Open","Adj. High","Adj. Low","Adj. Close","Adj. Volume",]] df["HL_PCT"] = (df["Adj. High"]- df["Adj. Close"]) / df["Adj. Close"] * 100 df["PCT_CHANGE"] = (df["Adj. Close"]- df["Adj. Open"]) / df["Adj. Open"] * 100 df = df[["Adj. Close", "HL_PCT", "PCT_CHANGE", "Adj. Volume"]] forecast_col = "Adj. Close" df.fillna(-99999, inplace=True) forecast_out = int(math.ceil(0.1*len(df))) print forecast_out """ Int is used in case Ceil returns a float Ceil gets the rounded up value """ df["label"] = df[forecast_col].shift(-forecast_out) X = np.array(df.drop(["label"],1)) X = preprocessing.scale(X) X_lately = X[-forecast_out] X = X[:-forecast_out] df.dropna(inplace=True) y = np.array(df["label"]) X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2) clf = LinearRegression(n_jobs=10) clf.fit(X_train, y_train) with open ('linearregression.pickle', 'wb') as f: pickle.dump(clf, f) #generates a pickle file which stores the data (important in large scale machine learning to store results!!) pickle_in = open('linearregression.pickle', 'rb') clf = pickle.load(pickle_in) accuracy = clf.score(X_test, y_test) forecast_set = clf.predict(X_lately) print forecast_set, accuracy, forecast_out df['Forecast'] = np.nan last_date = df.iloc[-1].name last_unix = time.mktime(last_date.timetuple()) one_day = 86400 next_unix = last_unix + one_day for i in forecast_set: next_date = datetime.datetime.fromtimestamp(next_unix) next_unix += one_day df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i] df['Adj. Close'].plot() df['Forecast'].plot() plt.legend('Date') plt.ylabel('Price') plt.show()
import numpy as np from tensorflow.keras.models import model_from_json class Model(object): # List of Emotions EMOTIONS_LIST = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"] def __init__(self, model_json, model_h5): # load model from JSON file with open(model_json, 'r') as json_file: loaded_model_json = json_file.read() self.loaded_model = model_from_json(loaded_model_json) # load weights into the new model self.loaded_model.load_weights(model_h5) # Function to predict emotion def predict_emotion(self, img): emotion = self.loaded_model.predict(img) return Model.EMOTIONS_LIST[np.argmax(emotion)], emotion[0]
#!/usr/bin/env python3 """ Script and class for creating tf.Record datasets for the KITTI visual odometry task. Will create 11 different datasets where one sequence is held out as test data while the other 10 sequences are used for training and validation. """ import argparse import numpy as np import os import matplotlib.pyplot as plt from random import shuffle import logging import sys from PIL import Image import cv2 import differentiable_filters.utils.recordio as tfr class KittiDataset(): def __init__(self, param): """ Class for creating tf.Record datasets for the KITTI visual odometry task. Parameters ---------- param : dict Dictionary of parameters Returns ------- None. """ self.param = param self.out_dir = param.out_dir def create_dataset(self, files_list_train, test_file, name): """ Creates a tf.Record dataset with the desired characteristics. Parameters ---------- files_list_train : list List of the raw data files that belong to the sequences for training and validation. test_file : str Raw data file for the test sequence name : str Name of the dataset Returns ------- None. """ # setup logging self.log = logging.getLogger(self.param.name) self.log.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s: [%(name)s] ' + '[%(levelname)s] %(message)s') # create console handler ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) self.log.addHandler(ch) # create file handler which logs warnings errors and criticals if os.path.exists(os.path.join(self.param.out_dir, name + '_error.log')): os.remove(os.path.join(self.param.out_dir, name + '_error.log')) fh = logging.FileHandler(os.path.join(self.param.out_dir, name + '_error.log')) fh.setLevel(logging.WARNING) fh.setFormatter(formatter) self.log.addHandler(fh) mean_diff = np.zeros(3) mean_rgb = np.zeros(3) count = 0 train_count = 0 val_count = 0 self.first = True self.keys = ['image', 'image_diff', 'state'] train_data = {key: [] for key in self.keys} test_data = {key: [] for key in self.keys} self.record_writer_train = \ tfr.RecordioWriter(self.out_dir, 300, name + '_train_') self.record_meta_train = tfr.RecordMeta(name + '_train_') self.record_writer_val = \ tfr.RecordioWriter(self.out_dir, 300, name + '_val_') self.record_meta_val = tfr.RecordMeta(name + '_val_') self.record_writer_test = \ tfr.RecordioWriter(self.out_dir, 300, name + '_test_') self.record_meta_test = tfr.RecordMeta(name + '_test_') for ind, f in enumerate(files_list_train): values_train = self._get_data(f, mean_diff, mean_rgb) for d in values_train: for key in self.keys: train_data[key] += [d[key]] if len(train_data['image']) > 1000: train_size, val_size = self._save_train_val(train_data) train_count += train_size val_count += val_size train_data = {key: [] for key in self.keys} if len(train_data['image']) > 0: train_size, val_size = self._save_train_val(train_data) train_count += train_size val_count += val_size values_test = self._get_data(test_file, mean_diff, mean_rgb, 'test') for d in values_test: for key in self.keys: test_data[key] += [d[key]] self._save(test_data) # save the meta information count = train_count + val_count + 1 fi = open(os.path.join(self.out_dir, 'info_' + name + '.txt'), 'w') fi.write('Num data points: ' + str(count) + '\n') fi.write('Num train: ' + str(train_count) + '\n') fi.write('Num val: ' + str(val_count) + '\n') fi.write('Num test: ' + str(1) + '\n') fi.write('mean depth: ' + str(mean_diff / (count)) + '\n') fi.write('mean rgb: ' + str(mean_rgb / (count)) + '\n') fi.close() self.record_writer_train.close() self.record_writer_test.close() self.record_writer_val.close() def _save_train_val(self, data): """ Save the training and validation data Parameters ---------- data : dict of lists A dictionary containing the example data Returns ------- train_size : int Number of training examples saved. val_size : int Number of validation examples saved. """ length = len(data['image']) # convert lists to numpy arrays for key in self.keys: if type(data[key]) == np.ndarray and data[key].dtype == np.float64: data[key] = np.array(data[key]).astype(np.float32) # shuffle the arrays together permutation = np.random.permutation(length) for key in self.keys: vals = np.copy(data[key]) data[key] = vals[permutation] train_size = int(np.floor(length * 6. / 7.)) val_size = length - train_size if train_size > 0: train_data = {} for key in self.keys: train_data[key] = np.copy(data[key][:train_size]) rw = self.record_writer_train rm = self.record_meta_train tfr.write_tfr(train_data, rw, rm, self.out_dir) if val_size > 0: val_data = {} for key in self.keys: val_data[key] = np.copy(data[key][train_size:]) rw = self.record_writer_val rm = self.record_meta_val tfr.write_tfr(val_data, rw, rm, self.out_dir) return train_size, val_size def _save(self, data): """ Save the test data Parameters ---------- data : dict of lists A dictionary containing the example data Returns ------- None. """ rw = self.record_writer_test rm = self.record_meta_test # convert lists to numpy arrays for key in self.keys: if type(data[key]) == list: v = np.array(data[key]) data[key] = v if type(data[key]) == np.ndarray and data[key].dtype == np.float64: data[key] = data[key].astype(np.float32) tfr.write_tfr(data, rw, rm, self.out_dir) return def _get_data(self, filename, mean_diff, mean_rgb, mode='train'): """ Generat data for one sequence Parameters ---------- filename : str name of the files belonging to this sequence. mean_diff : np.array Mean values of the difference image observations (can be used for normalization) mean_rgb : np.array Mean values of the rgb image observations (can be used for normalization) mode : str, optional Flag to signal if the sequence is used for training or testing. The default is 'train'. Returns ------- out : list List with all generated examples. """ self.log.info(mode + ': ' + filename) out = [] # load the txt file with open(filename + '_image1.txt', 'r') as f: tmp = np.loadtxt(f) # read in position and orientation data xs = tmp[:, 11:12] ys = -tmp[:, 3:4] thetas = self._wrap_angle(-np.arctan2(-tmp[:, 8:9], tmp[:, 10:11])) vs = np.sqrt((ys[1:] - ys[:-1])**2 + (xs[1:] - xs[:-1])**2) / 0.103 theta_dots = self._wrap_angle(thetas[1:] - thetas[:-1]) / 0.103 # read the images from the first camera, create rgb and difference # observations and mirrored versions of the observations ims, ims_m, im_diffs, im_diffs_m = self._read_images(filename, 'image_2') self.log.info('sequence length ' + str(len(ims))) # now ims and im_diffs start at t=1, but xs, ys, thetas, vs, and theta # dots start at t = 0, so we cut off the first entry xs = xs[1:] ys = ys[1:] thetas = thetas[1:] vs = vs[1:] theta_dots = theta_dots[1:] # we also cut off the last entries for the images, xs, ys and thetas, # since we do not have velocities for the last step ims = ims[:-1] im_diffs = im_diffs[:-1] ims_m = ims_m[:-1] im_diffs_m = im_diffs_m[:-1] xs = xs[:-1] ys = ys[:-1] thetas = thetas[:-1] assert len(ims) == len(xs) and len(ims) == len(vs) # positions and velocities are now aligned such that # p[i] + v[i] = p[i+1] # however, velocities and difference images are not aligned: # v[i] = p[i+1] = p[i] but im_diff[i] = im[i] - im[i-1] if mode == 'train': out += self._get_subsequences(ims, im_diffs, xs, ys, thetas, vs, theta_dots, 50, 100) # do the same again with the mirrored data out += self._get_subsequences(ims_m, im_diffs_m, xs, -ys, -thetas, vs, -theta_dots, 50, 100) elif mode == 'test': vals = {} # use the whole sequence vals['image'] = np.array(ims) vals['image_diff'] = np.array(im_diffs) vals['state'] = np.concatenate([xs, ys, thetas, vs, theta_dots], axis=1) out += [vals] # and the whole mirrored sequence vals = {} vals['image'] = np.array(ims_m) vals['image_diff'] = np.array(im_diffs_m) vals['state'] = np.concatenate([xs, -ys, -thetas, vs, -theta_dots], axis=1) out += [vals] # read the images from the second camera and repeat the process ims, ims_m, im_diffs, im_diffs_m = self._read_images(filename, 'image_3') # cut off the last entries for the images ims = ims[:-1] im_diffs = im_diffs[:-1] ims_m = ims_m[:-1] im_diffs_m = im_diffs_m[:-1] assert len(ims) == len(xs) and len(ims) == len(vs) if mode == 'train': out += self._get_subsequences(ims, im_diffs, xs, ys, thetas, vs, theta_dots, 50, 100) # do the same again with the mirrored data out += self._get_subsequences(ims_m, im_diffs_m, xs, -ys, -thetas, vs, -theta_dots, 50, 100) elif mode == 'test': vals = {} # use the whole sequence vals['image'] = np.array(ims) vals['image_diff'] = np.array(im_diffs) vals['state'] = np.concatenate([xs, ys, thetas, vs, theta_dots], axis=1) out += [vals] # and the whole mirrored sequence vals = {} vals['image'] = np.array(ims_m) vals['image_diff'] = np.array(im_diffs_m) vals['state'] = np.concatenate([xs, -ys, -thetas, vs, -theta_dots], axis=1) out += [vals] return out @staticmethod def _read_images(filename, cam_key): """ Reads in the images from the raw kitti data, creates the difference images and encodes them as byte strings. Also returns mirrired versions of each image for data augmentation Parameters ---------- filename : str name of the files belonging to this sequence. cam_key : str Either image_2 or image_3 for images from the first or second camera Returns ------- ims : list The original rgb images ims_m : list The mirrored rgb images im_diffs : list The original difference images im_diffs_m : list The mirrored difference images """ ims = [] ims_m = [] im_diffs = [] im_diffs_m = [] im_files = [os.path.join(filename, cam_key, fi) for fi in os.listdir(os.path.join(filename, cam_key))] im_files = sorted(im_files) i = Image.open(im_files[0]) p = np.asarray(i, 'float32') for ind, f in enumerate(im_files[1:]): i = Image.open(f) i = np.asarray(i, 'float32') diff = i - p im = cv2.cvtColor(i, cv2.COLOR_RGB2BGR) im = cv2.imencode('.png', im)[1].tobytes() im_m = cv2.cvtColor(np.fliplr(i), cv2.COLOR_RGB2BGR) im_m = cv2.imencode('.png', im_m)[1].tobytes() d = cv2.cvtColor(diff, cv2.COLOR_RGB2BGR) d = cv2.imencode('.png', d)[1].tobytes() d_m = cv2.cvtColor(np.fliplr(diff), cv2.COLOR_RGB2BGR) d_m = cv2.imencode('.png', d_m)[1].tobytes() im_diffs += [d] im_diffs_m += [d_m] ims += [im] ims_m += [im_m] p = np.copy(i) return ims, ims_m, im_diffs, im_diffs_m @staticmethod def _get_subsequences(ims, im_diffs, xs, ys, thetas, vs, theta_dots, num, sl): """ Extracts num sequences of length sl from each file, all starting at different timesteps. Parameters ---------- ims : list The full sequence of rgb images. im_diffs : list The full sequence of difference images xs : list The full sequence of positional x coordinates ys : list The full sequence of positional y-coordinates thetas : list The full sequence of headings. vs : list The full sequence of linear velocities theta_dots : list The full sequence of angular velocities num : int Number of subsequences to extract sl : int Length of the extracted subsequences Returns ------- out : list List with data for all extracted subsequences. """ inds = np.random.choice(len(ims) - sl, size=num, replace=False) inds = list(inds) shuffle(inds) out = [] for i in inds: vals = {} vals['image'] = np.array(ims[i:i+sl]) vals['image_diff'] = \ np.array(im_diffs[i:i+sl]) vals['state'] = \ np.concatenate([xs[i:i+sl], ys[i:i+sl], thetas[i:i+sl], vs[i:i+sl], theta_dots[i:i+sl]], axis=1) out += [vals] return out def _wrap_angle(self, angle): return ((angle - np.pi) % (2 * np.pi)) - np.pi def main(argv=None): parser = argparse.ArgumentParser('create kitti dataset') parser.add_argument('--name', dest='name', type=str, default='kitti', help='the name of the created datasets') parser.add_argument('--out-dir', dest='out_dir', type=str, required=True, help='where to store results') parser.add_argument('--source-dir', dest='source_dir', type=str, required=True, help='where to find the raw kitti dataset.') args = parser.parse_args(argv) plt.ioff() files = [os.path.join(args.source_dir, f) for f in os.listdir(args.source_dir) if os.path.isdir(os.path.join(args.source_dir, f))] files = sorted(files) for i in range(len(files)): if not os.path.exists(os.path.join(args.out_dir, 'info_' + args.name + '_' + str(i) + '.txt')): test_file = files[i] train_files = files[:i] + files[i+1:] print('set ', i, ' test file: ', test_file) print('train_files: ', train_files) c = KittiDataset(args) c.create_dataset(train_files, test_file, args.name + '_' + str(i)) else: print('A dataset with name, ', args.name + '_' + str(i), 'already exists at ', args.out_dir) return if __name__ == "__main__": main()
from time import sleep import numpy as np from robotics.openrave.utils import solve_inverse_kinematics, \ set_manipulator_conf, Conf, Traj, manip_from_pose_grasp from robotics.openrave.motion import has_mp, mp_birrt, mp_straight_line, linear_motion_plan, manipulator_motion_plan from robotics.openrave.transforms import set_pose, \ object_trans_from_manip_trans, trans_from_point # TODO - unify with fixed_tamp_holding APPROACH_VECTOR = 0.15*np.array([0, 0, -1]) DISABLE_MOTIONS = False DISABLE_MOTION_COLLISIONS = False assert not DISABLE_MOTIONS or DISABLE_MOTION_COLLISIONS if DISABLE_MOTIONS: print 'Warning: trajectories are disabled' if DISABLE_MOTION_COLLISIONS: print 'Warning: trajectory collisions are disabled' #################### def enable_all(all_bodies, enable): # Enables or disables all bodies for collision checking for body in all_bodies: body.Enable(enable) #################### def cfree_pose_fn(env, body1, body2): def cfree_pose(pose1, pose2): # Collision free test between an object at pose1 and an object at pose2 body1.Enable(True) set_pose(body1, pose1.value) body2.Enable(True) set_pose(body2, pose2.value) return not env.CheckCollision(body1, body2) return cfree_pose #################### def cfree_traj_fn(env, manipulator, body1, body2, all_bodies): robot = manipulator.GetRobot() def _cfree_traj_pose(traj, pose): # Collision free test between a robot executing traj and an object at pose enable_all(all_bodies, False) body2.Enable(True) set_pose(body2, pose.value) for conf in traj.value: set_manipulator_conf(manipulator, conf) if env.CheckCollision(robot, body2): return False return True def _cfree_traj_grasp_pose(traj, grasp, pose): # Collision free test between an object held at grasp while executing traj and an object at pose enable_all(all_bodies, False) body1.Enable(True) body2.Enable(True) set_pose(body2, pose.value) for conf in traj.value: set_manipulator_conf(manipulator, conf) manip_trans = manipulator.GetTransform() set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.value)) if env.CheckCollision(body1, body2): return False return True def cfree_traj(traj, pose): # Collision free test between a robot executing traj (which may or may not involve a grasp) and an object at pose if DISABLE_MOTION_COLLISIONS: return True if traj.pose is not None and traj.pose == pose: # This is the same pose of the manipulation return True return _cfree_traj_pose(traj, pose) and (traj.grasp is None or _cfree_traj_grasp_pose(traj, traj.grasp, pose)) return cfree_traj #################### def sample_grasp_traj_fn(env, manipulator, body1, all_bodies): robot = manipulator.GetRobot() def sample_grasp_traj(pose, grasp): # Sample pregrasp config and motion plan that performs a grasp enable_all(all_bodies, False) body1.Enable(True) set_pose(body1, pose.value) manip_trans = manip_from_pose_grasp(pose.value, grasp.value) grasp_conf = solve_inverse_kinematics(manipulator, manip_trans) # Grasp configuration if grasp_conf is None: return if DISABLE_MOTIONS: yield [(Conf(grasp_conf), Traj([]))] return set_manipulator_conf(manipulator, grasp_conf) robot.Grab(body1) pregrasp_trans = manip_trans.dot(trans_from_point(*APPROACH_VECTOR)) pregrasp_conf = solve_inverse_kinematics(manipulator, pregrasp_trans) # Pre-grasp configuration if pregrasp_conf is None: return # Trajectory from grasp configuration to pregrasp if has_mp(): path = mp_straight_line(robot, grasp_conf, pregrasp_conf) else: path = linear_motion_plan(robot, pregrasp_conf) #grasp_traj = vector_traj_helper(env, robot, approach_vector) #grasp_traj = workspace_traj_helper(base_manip, approach_vector) robot.Release(body1) if path is None: return grasp_traj = Traj(path) grasp_traj.pose = pose grasp_traj.grasp = grasp yield [(Conf(pregrasp_conf), grasp_traj)] return sample_grasp_traj #################### def sample_free_motion_fn(manipulator, base_manip, all_bodies): robot = manipulator.GetRobot() def sample_free_motion(conf1, conf2): # Sample motion while not holding if DISABLE_MOTIONS: #traj = Traj([conf1.value, conf2.value]) traj = Traj([conf2.value]) traj.pose = None traj.grasp = None yield [(traj,)] return enable_all(all_bodies, False) set_manipulator_conf(manipulator, conf1.value) if has_mp(): path = mp_birrt(robot, conf1.value, conf2.value) else: #traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10) path = manipulator_motion_plan(base_manip, manipulator, conf2.value, max_iterations=10) if path is None: return traj = Traj(path) traj.pose = None traj.grasp = None yield [(traj,)] return sample_free_motion #################### def sample_holding_motion_fn(manipulator, base_manip, body1, all_bodies): robot = manipulator.GetRobot() def sample_holding_motion(conf1, conf2, grasp): # Sample motion while holding if DISABLE_MOTIONS: #traj = Traj([conf1.value, conf2.value]) traj = Traj([conf2.value]) traj.pose = None traj.grasp = grasp yield [(traj,)] return enable_all(all_bodies, False) body1.Enable(True) set_manipulator_conf(manipulator, conf1.value) manip_trans = manipulator.GetTransform() set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.value)) robot.Grab(body1) if has_mp(): path = mp_birrt(robot, conf1.value, conf2.value) else: #traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10) path = manipulator_motion_plan(base_manip, manipulator, conf2.value, max_iterations=10) robot.Release(body1) if path is None: return traj = Traj(path) traj.pose = None traj.grasp = grasp yield [(traj,)] return sample_holding_motion #################### def visualize_solution(env, problem, initial_conf, robot, manipulator, bodies, plan): def _execute_traj(confs): for j, conf in enumerate(confs): set_manipulator_conf(manipulator, conf) sleep(0.05) #raw_input('%s/%s) Step?'%(j, len(confs))) # Resets the initial state set_manipulator_conf(manipulator, initial_conf.value) for obj, pose in problem.initial_poses.iteritems(): set_pose(bodies[obj], pose.value) raw_input('Start?') for i, (action, args) in enumerate(plan): #raw_input('\n%s/%s) Next?'%(i, len(plan))) if action.name == 'move': _, _, traj = args _execute_traj(traj.value) elif action.name == 'move_holding': _, _, traj, _, _ = args _execute_traj(traj.value) elif action.name == 'pick': obj, _, _, _, traj = args _execute_traj(traj.value[::-1]) robot.Grab(bodies[obj]) _execute_traj(traj.value) elif action.name == 'place': obj, _, _, _, traj = args _execute_traj(traj.value[::-1]) robot.Release(bodies[obj]) _execute_traj(traj.value) else: raise ValueError(action.name) env.UpdatePublishedBodies()
"""Climate normals daily data""" import re import numpy as np import pandas as pd def norm_get_dly(): """Get all daily climate normals data""" prcp = norm_get_dly_prcp() snow = norm_get_dly_snow() tavg = norm_get_dly_tavg() tmax = norm_get_dly_tmax() tmin = norm_get_dly_tmin() by = ["id", "month", "day"] df = pd.merge(prcp, snow, how="outer", on=by) df = pd.merge(df, tavg, how="outer", on=by) df = pd.merge(df, tmax, how="outer", on=by) df = pd.merge(df, tmin, how="outer", on=by) df = remove_invalid_dates(df) df = df.sort_values(by) return df def norm_get_dly_prcp(): path = "https://www1.ncdc.noaa.gov/pub/data/normals/1981-2010/products/precipitation/dly-prcp-pctall-ge010hi.txt" df = norm_read_dly_file(path) df = pd.melt(df, id_vars=["id", "month"]) df.columns = ["id", "month", "day", "prcp"] df["month"] = df["month"].astype(int) df["day"] = df["day"].astype(int) df["prcp"] = df["prcp"] / 10 return df def norm_get_dly_snow(): path = "https://www1.ncdc.noaa.gov/pub/data/normals/1981-2010/products/precipitation/dly-snow-pctall-ge010ti.txt" df = norm_read_dly_file(path) df = pd.melt(df, id_vars=["id", "month"]) df.columns = ["id", "month", "day", "snow"] df["month"] = df["month"].astype(int) df["day"] = df["day"].astype(int) df["snow"] = df["snow"] / 10 return df def norm_get_dly_tavg(): path = "https://www1.ncdc.noaa.gov/pub/data/normals/1981-2010/products/temperature/dly-tavg-normal.txt" df = norm_read_dly_file(path) df = pd.melt(df, id_vars=["id", "month"]) df.columns = ["id", "month", "day", "tavg"] df["month"] = df["month"].astype(int) df["day"] = df["day"].astype(int) df["tavg"] = df["tavg"] / 10 return df def norm_get_dly_tmax(): path = "https://www1.ncdc.noaa.gov/pub/data/normals/1981-2010/products/temperature/dly-tmax-normal.txt" df = norm_read_dly_file(path) df = pd.melt(df, id_vars=["id", "month"]) df.columns = ["id", "month", "day", "tmax"] df["month"] = df["month"].astype(int) df["day"] = df["day"].astype(int) df["tmax"] = df["tmax"] / 10 return df def norm_get_dly_tmin(): path = "https://www1.ncdc.noaa.gov/pub/data/normals/1981-2010/products/temperature/dly-tmin-normal.txt" df = norm_read_dly_file(path) df = pd.melt(df, id_vars=["id", "month"]) df.columns = ["id", "month", "day", "tmin"] df["month"] = df["month"].astype(int) df["day"] = df["day"].astype(int) df["tmin"] = df["tmin"] / 10 return df def norm_read_dly_file(path): """Read a normals daily file""" spec1 = {"id": [(0, 11), str], "month": [(12, 14), float]} spec2 = {i: [(11 + i * 7, 16 + i * 7), float] for i in range(1, 32)} spec = {**spec1, **spec2} names = spec.keys() values = spec.values() colspecs = [x[0] for x in values] dtypes = [x[1] for x in values] dtypes = dict(zip(names, dtypes)) na = ["-5555", "-6666", "-7777", "-8888", "-9999"] try: df = pd.read_fwf( path, colspecs=colspecs, names=names, dtype=dtypes, na_values=na, keep_default_na=False, ) except Exception as e: print("Exception: ", e, " at ", path) return df def remove_invalid_dates(df): bad = ( ((df["month"] == 2) & (df["day"] > 29)) | ((df["month"] == 4) & (df["day"] == 31)) | ((df["month"] == 6) & (df["day"] == 31)) | ((df["month"] == 9) & (df["day"] == 31)) | ((df["month"] == 11) & (df["day"] == 31)) ) return df[~bad]
#coding=utf-8 ''' Created on 2016-9-27 @author: dengdan ''' import matplotlib as mpl # mpl.use('Agg') mpl.use('TkAgg') import matplotlib.pyplot as plt import numpy as np import util def hist(x, title = None, normed = False, show = True, save = False, save_path = None, bin_count = 100, bins = None, cumulative = False): x = np.asarray(x) if len(np.shape(x)) > 1: # x = np.reshape(x, np.prod(x.shape)) x = util.np.flatten(x) if bins == None: bins = np.linspace(start = min(x), stop = max(x), num = bin_count, endpoint = True, retstep = False) plt.figure(num = title) plt.hist(x, bins, normed = normed, cumulative = cumulative) if save: if save_path is None: raise ValueError path = util.io.join_path(save_path, title + '.png') save_image(path) if show: plt.show() #util.img.imshow(title, path, block = block) def plot_solver_data(solver_path): data = util.io.load(solver_path) training_losses = data.training_losses training_accuracies = data.training_accuracies val_losses = data.val_losses val_accuracies = data.val_accuracies plt.figure(solver_path) n = len(training_losses) x = range(n) plt.plot(x, training_losses, 'r-', label = 'Training Loss') if len(training_accuracies) > 0: plt.plot(x, training_accuracies, 'r--', label = 'Training Accuracy') if len(val_losses) > 0: n = len(val_losses) x = range(n) plt.plot(x, val_losses, 'g-', label = 'Validation Loss') if len(val_accuracies) > 0: plt.plot(x, val_accuracies, 'g--', label = 'Validation Accuracy') plt.legend() plt.show() def rectangle(xy, width, height, color = 'red', linewidth = 1, fill = False, alpha = None, axis = None): """ draw a rectangle on plt axis """ import matplotlib.patches as patches rect = patches.Rectangle( xy = xy, width = width, height = height, alpha = alpha, color = color, fill = fill, linewidth = linewidth ) if axis is not None: axis.add_patch(rect) return rect rect = rectangle def set_pos(x, y): mgr = plt.get_current_fig_manager() backend = mpl.get_backend() if backend == 'TkAgg': mgr.window.wm_geometry("+%d+%d" % (x, y)) elif backend == 'WXAgg': mgr.window.SetPosition((x, y)) else: # This works for QT and GTK # You can also use window.setGeometry mgr.window.move(x, y) def maximize_figure(): mng = plt.get_current_fig_manager() mng.full_screen_toggle() def line(xy_start, xy_end, color = 'red', linewidth = 1, alpha = None, axis = None): """ draw a line on plt axis """ from matplotlib.lines import Line2D num = 100 xdata = np.linspace(xy_start[0], xy_end[0], num = num) ydata = np.linspace(xy_start[1], xy_end[1], num = num) line = Line2D( alpha = alpha, color = color, linewidth = linewidth, xdata = xdata, ydata = ydata ) if axis is not None: axis.add_line(line) return line def imshow(title = None, img = None, gray = False): show_images([img], [title], gray = gray) def show_images(images, titles = None, shape = None, share_axis = False, bgr2rgb = False, maximized = False, show = True, gray = False, save = False, color_bar = False, path = None, axis_off = False, vertical = False, subtitle = None): plt.close('all') if shape == None: if vertical: shape = (len(images), 1) else: shape = (1, len(images)) shape = list(shape) if shape[0] < 0: shape[0] =(len(images) + shape[1]) / shape[1] elif shape[1] < 0: shape[1] = (len(images + shape[0])) / shape[0] ret_axes = [] ax0 = None plt.figure() for idx, img in enumerate(images): if bgr2rgb: img = util.img.bgr2rgb(img) loc = (idx / shape[1], idx % shape[1]) if idx == 0: ax = plt.subplot2grid(shape, loc) ax0 = ax else: if share_axis: ax = plt.subplot2grid(shape, loc, sharex = ax0, sharey = ax0) else: ax = plt.subplot2grid(shape, loc) if len(np.shape(img)) == 2 and gray: img_ax = ax.imshow(img, cmap = 'gray') else: img_ax = ax.imshow(img) if len(np.shape(img)) == 2 and color_bar: plt.colorbar(img_ax, ax = ax) if titles != None: ax.set_title(titles[idx]) if axis_off: plt.axis('off') # plt.xticks([]), plt.yticks([]) ret_axes.append(ax) if subtitle is not None: set_subtitle(subtitle) if maximized: maximize_figure() if save: if path is None: path = util.get_temp_path() print path # raise ValueError('path can not be None when save is True') save_image(path) if show: plt.show() return ret_axes def save_image(path, img = None, dpi = 150): path = util.io.get_absolute_path(path) util.io.make_parent_dir(path) if img is None: plt.gcf().savefig(path, dpi = dpi) else: plt.imsave(path, img, format = 'png') imwrite = save_image def to_ROI(ax, ROI): xy1, xy2 = ROI xmin, ymin = xy1 xmax, ymax = xy2 ax.set_xlim(xmin, xmax) #ax.extent ax.set_ylim(ymax, ymin) def set_subtitle(title, fontsize = 12): plt.gcf().suptitle(title, fontsize=fontsize) def show(maximized = False): if maximized: maximize_figure() plt.show() def draw(): plt.gcf().canvas.draw() def get_random_line_style(): colors = ['r', 'g', 'b'] line_types = ['-']#, '--', '-.', ':'] idx = util.rand.randint(len(colors)) color = colors[idx] idx = util.rand.randint(len(line_types)) line_type = line_types[idx] return color + line_type
import numpy as np import fenics as fe import matplotlib.pyplot as plt from mpl_toolkits import mplot3d import time as ti from ellipticCauchyPro import * from vbiIP import * from addNoise import * """ This script used for evaluating Example 1 shown in B. Jin, A variational Bayesian method to inverse problems with implusive noise, Journal of Computational Physics, 231, 2012, 423-435. """ # specify the true values of the Neumann boundary # the domain is a square q1 = {0} \times [0,1], q2 = {1} \times [0,1] # q3 = [0,1] \times {0} q1_expre_n = "-(pi*cos(pi*x[0])*exp(pi*x[1]) + 1)" q2_expre_n = "pi*cos(pi*x[0])*exp(pi*x[1]) + 1" q3_expre_n = "-(pi*sin(pi*x[0])*exp(pi*x[1]) + 1)" # specify the true values of the Dirichelet boundary # q4 = [0,1] \times {1} #q4_expre_d = "sin(pi*x[0])*exp(pi) + x[0] + 1" #q4_expre_d = "0.3<=x[0] && x[0]<=0.7 ? 0.5 : 0" q4_expre_d = "0.0<=x[0] && x[0]<=0.5 ? 2*x[0] : (0.5<=x[0] && x[0]<=1.0 ? 2-2*x[0] : 0)" # solving the forward problem para = {'mesh_N': [100, 100], 'q1': q1_expre_n, 'q2': q2_expre_n, \ 'q3': q3_expre_n, 'q4': q4_expre_d, 'alpha': '1.0', \ 'f': '0.0', 'P': 2} # specify the coordiantes of the measurement points mea = MeasurePoints(80) # point number should be an even number # generate measurement data by employing FEM on a fine mesh fineFun = calTrueSol(para) u_t = lambda dian: fineFun(dian[0], dian[1]) u_tm = np.array([u_t(dian) for dian in mea.points_m]) gH2 = GeneUH(para) gH2.eva() Uh = gH2.gene(mea.points_m) paraNoise = {'rate': 0.5, 'noise_level': 5} #u_tm_n, sig = addGaussianNoise(u_tm, paraNoise, 'y') u_tm_n = addImplusiveNoise(u_tm, paraNoise, 'y') d = u_tm_n - Uh # generate the forward operator H para['mesh_N'] = [50, 50] gH = GeneH(para) gH.eva(101) # set 101 basis functions #H = gH.gene(mea.points_m) #np.save('Hsave.npy', H) H = np.load('Hsave.npy') # generate the regularize matrix r, c = np.shape(H) W = geneL(c) # Alg I with Gaussian noise assumption para1 = {'alpha0': 1, 'beta0': 1e-3, 'alpha1': 1, 'beta1': 1e-7} t01 = ti.time() mE1, precisionMatrix1, eta1, lan1, tau1, ite1 = approxIGaussian(H, W, d, para1) t11 = ti.time() # Alg II with Gaussian noise assumption para2 = {'alpha0': 1, 'beta0': 1e-3, 'alpha1': 1, 'beta1': 1e-7} t02 = ti.time() mE2, eta2, lan2, tau2, ite2 = approxIIGaussian(H, W, d, para1) t12 = ti.time() # Alg I with centered t-distribution noise assumption para3 = {'alpha0': 1, 'beta0': 1e-3, 'alpha1': 1, 'beta1': 1e-7} t03 = ti.time() mE3, precisionMatrix3, lan3, error, wS, ite3 = approxICenteredT(H, W, d, para2) t13 = ti.time() ''' post process ''' # calculate the estimated function xx = np.linspace(0, 1, 100) yy = xx*0.0 + 1.0 gH.setBasis(mE1) fmE1 = gH.calBasisFun(xx) gH.setBasis(mE2) fmE2 = gH.calBasisFun(xx) gH.setBasis(mE3) fmE3 = gH.calBasisFun(xx) # calculate the true function trueFun = fe.Expression(q4_expre_d, degree=5) trueFun = np.vectorize(trueFun) fm = trueFun(xx, yy) print('Approx I:') print('Inversion consumes ', t11-t01, 's') print('approxI iterate ', ite1, ' times') print('The regularization parameter is ', eta1[-1]) res_opt1 = np.linalg.norm(fmE1 - fm, ord=2)/np.linalg.norm(fm, ord=2) print('L2 norm of residual = ', res_opt1*100, '%') print('') print('Approx II:') print('Inversion consumes ', t12-t02, 's') print('approxI iterate ', ite2, ' times') print('The regularization parameter is ', eta2[-1]) res_opt2 = np.linalg.norm(fmE2 - fm, ord=2)/np.linalg.norm(fm, ord=2) print('L2 norm of residual = ', res_opt2*100, '%') print('') print('Roboust Approx I:') print('Inversion consumes ', t13-t03, 's') print('approxII iterate ', ite3, ' times') print('The regularization parameter is ', lan3[-1]) res_opt3 = np.linalg.norm(fmE3 - fm, ord=2)/np.linalg.norm(fm, ord=2) print('L2 norm of residual = ', res_opt3*100, '%') plt.figure() plt.plot(xx, fmE1, color='red', label='Estimated by Alg I') plt.plot(xx, fmE2, color='black', linestyle='dashed', label='Estimated by Alg II') plt.plot(xx, fmE3, color='red', linestyle='dashed', label='Estimated by robust Alg I') plt.plot(xx, fm, color='blue', label='True') plt.xlabel('x coordiante') plt.ylabel('function values') plt.title('True and estimated functions') plt.legend() plt.show() plt.figure() plt.plot(wS/np.max(wS), 'o-') plt.plot(d/np.max(d), '*-') plt.show() plt.figure() plt.plot(d/np.max(d)) dture = u_tm - Uh plt.plot(dture/np.max(dture)) plt.show() #fig = plt.figure() #ax = plt.axes(projection='3d') #xplt = np.linspace(0, 1, precisionMatrix2.shape[0]) #yplt = np.linspace(0, 1, precisionMatrix2.shape[1]) #Xplt, Yplt = np.meshgrid(xplt, yplt) #ax.plot_surface(Xplt, Yplt, np.mat(precisionMatrix2).I, rstride=1, cstride=1, cmap='viridis', edgecolor='none') #ax.set_title('Covariance') #plt.show()
import numpy as np import matplotlib.pyplot as plt import itertools as itr class AlignPlot(object): #Class to define plot for interactive aligning of images def __init__(self, ImgAligner): self.ImgAligner = ImgAligner self.fig = plt.figure() self.ax = plt.axes([0.05, 0.05, 0.95, 0.95]) self.IndGen = itr.cycle(range(ImgAligner.numIms)[1:]) self.Index = next(self.IndGen) self.ShowImages() self.cidkey = self.fig.canvas.mpl_connect('key_press_event', self.MoveImg) plt.show() def ShowImages(self): #Refresh the axis and plot a pair of images self.ax.cla() self.ax.imshow(self.ImgAligner.Images[0].data, interpolation = 'none', clim = self.ImgAligner.Images[0].Imglim, cmap = 'Blues') ## Fix rollover error - rolled full cycle ImgOver = np.roll(np.roll(self.ImgAligner.Images[self.Index].data, self.ImgAligner.offsets[0][self.Index], axis = 1), self.ImgAligner.offsets[1][self.Index], axis = 0) self.ax.imshow(ImgOver, interpolation = 'none', cmap = 'Reds', clim = self.ImgAligner.Images[self.Index].Imglim, alpha = 0.5) self.ax.axis('off') self.fig.canvas.draw() def MoveImg(self, event): #Calculations to do on pressing a button - arrow keys to move top image to overlay the bottom if event.key == 'right': self.ImgAligner.offsets[0][self.Index] += 1 elif event.key == 'left': self.ImgAligner.offsets[0][self.Index] -= 1 elif event.key == 'up': self.ImgAligner.offsets[1][self.Index] -= 1 elif event.key == 'down': self.ImgAligner.offsets[1][self.Index] += 1 elif event.key == 'n': self.Index = next(self.IndGen) elif event.key == 'enter': self.fig.canvas.mpl_disconnect(self.cidkey) plt.close() else: return self.ShowImages()
import os import sys import unittest import numpy as np import torch from torch.nn import functional as torch_F from src.cranet.nn import functional as cranet_F from src import cranet from ..utils import teq class TestL1Loss(unittest.TestCase): def test_l1_0(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.rand(64, 10) x_d = cranet.Tensor(x, requires_grad=True) x_t = torch.tensor(x, requires_grad=True) y_d = cranet.Tensor(y, requires_grad=True) y_t = torch.tensor(y, requires_grad=True) l_d = cranet_F.l1_loss(x_d, y_d, reduction='mean') l_t = torch_F.l1_loss(x_t, y_t, reduction='mean') l_t.backward() l_d.backward() self.assertTrue(teq(l_d, l_t)) self.assertTrue(teq(x_d.grad, x_t.grad)) self.assertTrue(teq(y_d.grad, y_t.grad)) def test_l1_1(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.rand(64, 10) x_d = cranet.Tensor(x, requires_grad=True) x_t = torch.tensor(x, requires_grad=True) y_d = cranet.Tensor(y, requires_grad=True) y_t = torch.tensor(y, requires_grad=True) l_d = cranet_F.l1_loss(x_d, y_d, reduction='sum') l_t = torch_F.l1_loss(x_t, y_t, reduction='sum') l_t.backward() l_d.backward() self.assertTrue(teq(l_d, l_t, 2e-13)) self.assertTrue(teq(x_d.grad, x_t.grad)) self.assertTrue(teq(y_d.grad, y_t.grad)) def test_l1_2(self): for _ in range(100): a = np.random.rand(64, 10) b = np.random.rand(64, 10) x_d = cranet.Tensor(a, requires_grad=True) x_t = torch.tensor(a, requires_grad=True) y_d = cranet.Tensor(b, requires_grad=True) y_t = torch.tensor(b, requires_grad=True) l_d = cranet_F.l1_loss(x_d, y_d, reduction='none') l_t = torch_F.l1_loss(x_t, y_t, reduction='none') g = np.random.rand(64, 10) l_t.backward(torch.tensor(g)) l_d.backward(cranet.Tensor(g)) self.assertTrue(teq(y_d, y_t)) self.assertTrue(teq(x_d.grad, x_t.grad)) self.assertTrue(teq(y_d.grad, y_t.grad)) class TestMSELoss(unittest.TestCase): def test_mse_0(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.rand(64, 10) x_d = cranet.Tensor(x, requires_grad=True) x_t = torch.tensor(x, requires_grad=True) y_d = cranet.Tensor(y, requires_grad=True) y_t = torch.tensor(y, requires_grad=True) l_d = cranet_F.mse_loss(x_d, y_d, reduction='mean') l_t = torch_F.mse_loss(x_t, y_t, reduction='mean') l_t.backward() l_d.backward() self.assertTrue(teq(l_d, l_t)) self.assertTrue(teq(x_d.grad, x_t.grad)) self.assertTrue(teq(y_d.grad, y_t.grad)) def test_mse_1(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.rand(64, 10) x_d = cranet.Tensor(x, requires_grad=True) x_t = torch.tensor(x, requires_grad=True) y_d = cranet.Tensor(y, requires_grad=True) y_t = torch.tensor(y, requires_grad=True) l_d = cranet_F.mse_loss(x_d, y_d, reduction='sum') l_t = torch_F.mse_loss(x_t, y_t, reduction='sum') l_t.backward() l_d.backward() self.assertTrue(teq(l_d, l_t, 2e-13)) self.assertTrue(teq(x_d.grad, x_t.grad)) self.assertTrue(teq(y_d.grad, y_t.grad)) def test_mse_2(self): for _ in range(100): a = np.random.rand(64, 10) b = np.random.rand(64, 10) x_d = cranet.Tensor(a, requires_grad=True) x_t = torch.tensor(a, requires_grad=True) y_d = cranet.Tensor(b, requires_grad=True) y_t = torch.tensor(b, requires_grad=True) l_d = cranet_F.mse_loss(x_d, y_d, reduction='none') l_t = torch_F.mse_loss(x_t, y_t, reduction='none') g = np.random.rand(64, 10) l_t.backward(torch.tensor(g)) l_d.backward(cranet.Tensor(g)) self.assertTrue(teq(y_d, y_t)) self.assertTrue(teq(x_d.grad, x_t.grad)) self.assertTrue(teq(y_d.grad, y_t.grad)) class TestBCELoss(unittest.TestCase): def test_binary_cross_entropy_0(self): for _ in range(1000): a = np.random.rand(10) b = np.random.rand(10) a0 = cranet.Tensor(a, requires_grad=True) a1 = torch.tensor(a, requires_grad=True) b0 = cranet.Tensor(b, requires_grad=True) b1 = torch.tensor(b) c0 = cranet_F.binary_cross_entropy(a0, b0, reduction='mean') c1 = torch_F.binary_cross_entropy(a1, b1, reduction='mean') c0.zero_grad() c1.backward() c0.backward() self.assertTrue(teq(c0, c1)) self.assertTrue(teq(a0.grad, a1.grad, 2e-11)) def test_binary_cross_entropy_1(self): for _ in range(1000): a = np.random.rand(10) b = np.random.rand(10) a0 = cranet.Tensor(a, requires_grad=True) a1 = torch.tensor(a, requires_grad=True) b0 = cranet.Tensor(b, requires_grad=True) b1 = torch.tensor(b) c0 = cranet_F.binary_cross_entropy(a0, b0, reduction='sum') c1 = torch_F.binary_cross_entropy(a1, b1, reduction='sum') c0.zero_grad() c1.backward() c0.backward() self.assertTrue(teq(c0, c1, 2e-9)) self.assertTrue(teq(a0.grad, a1.grad, 2e-10)) def test_binary_cross_entropy_2(self): for _ in range(1000): a = np.random.rand(3, 5) b = np.random.rand(3, 5) a0 = cranet.Tensor(a, requires_grad=True) a1 = torch.tensor(a, requires_grad=True) b0 = cranet.Tensor(b, requires_grad=True) b1 = torch.tensor(b) c0 = cranet_F.binary_cross_entropy(a0, b0, reduction='mean') c1 = torch_F.binary_cross_entropy(a1, b1, reduction='mean') c0.zero_grad() c1.backward() c0.backward() self.assertTrue(teq(c0, c1, 2e-9)) self.assertTrue(teq(a0.grad, a1.grad, 2e-9)) def test_binary_cross_entropy_3(self): for _ in range(1000): a = np.random.rand(3, 5, 4) b = np.random.rand(3, 5, 4) a0 = cranet.Tensor(a, requires_grad=True) a1 = torch.tensor(a, requires_grad=True) b0 = cranet.Tensor(b, requires_grad=True) b1 = torch.tensor(b) c0 = cranet_F.binary_cross_entropy(a0, b0, reduction='sum') c1 = torch_F.binary_cross_entropy(a1, b1, reduction='sum') c0.zero_grad() c1.backward() c0.backward() self.assertTrue(teq(c0, c1, 2e-9)) self.assertTrue(teq(a0.grad, a1.grad, 2e-9)) class TestCrossEntropyLoss(unittest.TestCase): def test_cross_entropy_0(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.randint(0, 10, [64]) x_d = cranet.Tensor(x, requires_grad=True) y_d = cranet.Tensor(y) x_t = torch.tensor(x, requires_grad=True) y_t = torch.tensor(y) l_d = cranet_F.cross_entropy(x_d, y_d, reduction='mean') l_t = torch_F.cross_entropy(x_t, y_t, reduction='mean') self.assertTrue(teq(l_d, l_t)) l_t.backward() l_d.backward() self.assertTrue(teq(x_d.grad, x_t.grad)) def test_cross_entropy_1(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.randint(0, 10, [64]) x_d = cranet.Tensor(x, requires_grad=True) y_d = cranet.Tensor(y) x_t = torch.tensor(x, requires_grad=True) y_t = torch.tensor(y) l_d = cranet_F.cross_entropy(x_d, y_d, reduction='sum') l_t = torch_F.cross_entropy(x_t, y_t, reduction='sum') self.assertTrue(teq(l_d, l_t, 2e-13)) l_t.backward() l_d.backward() self.assertTrue(teq(x_d.grad, x_t.grad)) def test_cross_entropy_2(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.randint(0, 10, [64]) x_d = cranet.Tensor(x, requires_grad=True) y_d = cranet.Tensor(y) x_t = torch.tensor(x, requires_grad=True) y_t = torch.tensor(y) l_d = cranet_F.cross_entropy(x_d, y_d, reduction='none') l_t = torch_F.cross_entropy(x_t, y_t, reduction='none') self.assertTrue(teq(l_d, l_t)) g = np.random.rand(64) l_t.backward(torch.tensor(g)) l_d.backward(cranet.Tensor(g)) self.assertTrue(teq(x_d.grad, x_t.grad)) # def test_cross_entropy_3(self): # for _ in range(100): # x = np.random.rand(64, 10) # y = np.random.randint(0, 10, [64]) # w = np.random.rand(64) # x_d = cranet.Tensor(x, requires_grad=True) # y_d = cranet.Tensor(y) # x_t = torch.tensor(x, requires_grad=True) # y_t = torch.tensor(y) # l_d = cranet_F.cross_entropy(x_d, y_d, weight=cranet.Tensor(w)) # l_t = torch_F.cross_entropy(x_t, y_t, weight=torch.tensor(w)) # self.assertTrue(teq(l_d, l_t)) # l_t.backward() # l_d.backward() # self.assertTrue(teq(x_d.grad, x_t.grad)) class TestNLLLoss(unittest.TestCase): def test_nll_loss_0(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.randint(0, 10, [64]) x_c = cranet.tensor(x, requires_grad=True) y_c = cranet.tensor(y) x_t = torch.tensor(x, requires_grad=True) y_t = torch.tensor(y) l_c = cranet_F.nll_loss(x_c, y_c, reduction='mean') l_t = torch_F.nll_loss(x_t, y_t, reduction='mean') self.assertTrue(teq(l_c, l_t)) l_t.backward() l_c.backward() self.assertTrue(teq(x_c.grad, x_t.grad)) def test_nll_loss_1(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.randint(0, 10, [64]) x_d = cranet.Tensor(x, requires_grad=True) y_d = cranet.Tensor(y) x_t = torch.tensor(x, requires_grad=True) y_t = torch.tensor(y) l_d = cranet_F.nll_loss(x_d, y_d, reduction='sum') l_t = torch_F.nll_loss(x_t, y_t, reduction='sum') self.assertTrue(teq(l_d, l_t, 2e-14)) l_t.backward() l_d.backward() self.assertTrue(teq(x_d.grad, x_t.grad)) def test_nll_loss_2(self): for _ in range(100): x = np.random.rand(64, 10) y = np.random.randint(0, 10, [64]) x_d = cranet.Tensor(x, requires_grad=True) y_d = cranet.Tensor(y) x_t = torch.tensor(x, requires_grad=True) y_t = torch.tensor(y) l_d = cranet_F.nll_loss(x_d, y_d, reduction='none') l_t = torch_F.nll_loss(x_t, y_t, reduction='none') self.assertTrue(teq(l_d, l_t)) g = np.random.rand(64) l_t.backward(torch.tensor(g)) l_d.backward(cranet.Tensor(g)) self.assertTrue(teq(x_d.grad, x_t.grad)) # def test_nll_loss_3(self): # for _ in range(100): # x = np.random.rand(64, 10) # y = np.random.randint(0, 10, [64]) # w = np.random.rand(64) # x_d = cranet.Tensor(x, requires_grad=True) # y_d = cranet.Tensor(y) # x_t = torch.tensor(x, requires_grad=True) # y_t = torch.tensor(y) # l_d = cranet_F.nll_loss(x_d, y_d, weight=cranet.Tensor(w)) # l_t = torch_F.nll_loss(x_t, y_t, weight=torch.tensor(w)) # self.assertTrue(teq(l_d, l_t)) # l_t.backward() # l_d.backward() # self.assertTrue(teq(x_d.grad, x_t.grad)) if __name__ == '__main__': sys.path.append(os.getcwd()) unittest.main()
import sys import numpy import bob.io.base import bob.io.base.test_utils import bob.io.image import bob.ip.facedetect from bob.ip.skincolorfilter import SkinColorFilter face_image = bob.io.base.load(bob.io.base.test_utils.datafile('test-face.jpg', 'bob.ip.skincolorfilter')) detection = bob.ip.facedetect.detect_single_face(face_image) bounding_box, quality = bob.ip.facedetect.detect_single_face(face_image) face = face_image[:, bounding_box.top:bounding_box.bottom, bounding_box.left:bounding_box.right] skin_filter = SkinColorFilter() skin_filter.estimate_gaussian_parameters(face) skin_mask = skin_filter.get_skin_mask(face_image, 0.5) skin_image = numpy.copy(face_image) skin_image[:, numpy.logical_not(skin_mask)] = 0 from matplotlib import pyplot f, ax = pyplot.subplots(1, 1) ax.set_title('Original Image') ax.set_xticks([]) ax.set_yticks([]) ax.imshow(numpy.rollaxis(numpy.rollaxis(face_image, 2),2)) f, ax = pyplot.subplots(1, 1) ax.set_title('Detected skin pixels') ax.set_xticks([]) ax.set_yticks([]) ax.imshow(numpy.rollaxis(numpy.rollaxis(skin_image, 2),2))
import numpy as np X = np.arange(28).reshape(4,7) print(X) print(X[::2, ::3])
#!/usr/bin/env python """ Utility and I/O functions to read and write data files or Synapse tables. Authors: - Arno Klein, 2015-2016 (arno@childmind.org) http://binarybottle.com Copyright 2015-2016, Sage Bionetworks (sagebase.org), with later modifications: Copyright 2016, Child Mind Institute (childmind.org), Apache v2.0 License """ def extract_synapse_rows(synapse_table, save_path=None, limit=None, username='', password=''): """ Extract rows from a Synapse table. Parameters ---------- synapse_table : string or Schema a synapse ID or synapse table Schema object save_path : string save rows as separate files in this path, unless empty or None limit : integer or None limit to number of rows returned by the query username : string Synapse username (only needed once on a given machine) password : string Synapse password (only needed once on a given machine) Returns ------- rows : list of pandas Series each row of a Synapse table row_files: list of strings file names corresponding to each of the rows Examples -------- >>> from mhealthx.xio import extract_synapse_rows >>> import synapseclient >>> syn = synapseclient.Synapse() >>> syn.login() >>> synapse_table = 'syn4590865' >>> save_path = '.' >>> limit = 3 >>> username = '' >>> password = '' >>> rows, row_files = extract_synapse_rows(synapse_table, save_path, limit, username='', password='') """ import os import pandas as pd import synapseclient # Log in to Synapse: syn = synapseclient.Synapse(skip_checks=True) if username and password: syn.login(username, password, silent=True) else: syn.login(silent=True) # Synapse table query: if limit: limit_string = 'limit {0}'.format(limit) else: limit_string = '' try: results = syn.tableQuery('select * from {0} {1}'. format(synapse_table, limit_string)) except IOError as e: import traceback; traceback.print_exc() print("I/O error({0}): {1}".format(e.errno, e.strerror)) rows = None row_files = None else: # Column headers: headers = {header['name']: i for i, header in enumerate(results.headers)} # Rows: rows = [] row_files = [] for irow, row in enumerate(results): row_map = {col:row[i] for col,i in headers.iteritems()} columns = row_map.keys() values = [unicode(x) for x in row_map.values()] row_series = pd.Series(values, columns) if save_path: csv_file = 'row{0}_v{1}_{2}'.format(row_map['ROW_ID'], row_map['ROW_VERSION'], row_map['recordId']) csv_file = os.path.join(save_path, csv_file) try: row_series.to_csv(csv_file) except IOError as e: import traceback; traceback.print_exc() print("I/O error({0}): {1}".format(e.errno, e.strerror)) else: rows.append(row_series) row_files.append(csv_file) return rows, row_files def read_file_from_synapse_table(synapse_table, row, column_name, out_path=None, username='', password=''): """ Read data from a row of a Synapse table. Parameters ---------- synapse_table : string or Schema a synapse ID or synapse table Schema object row : pandas Series or string row of a Synapse table converted to a Series or csv file column_name : string name of file handle column out_path : string a local path in which to store downloaded files username : string Synapse username (only needed once on a given machine) password : string Synapse password (only needed once on a given machine) Returns ------- row : pandas Series same as passed in: row of a Synapse table as a file or Series filepath : string downloaded file (full path) Examples -------- >>> from mhealthx.xio import extract_synapse_rows, read_file_from_synapse_table >>> import synapseclient >>> syn = synapseclient.Synapse() >>> syn.login() >>> synapse_table = 'syn4590865' >>> save_path = '.' >>> limit = 3 >>> username = '' >>> password = '' >>> rows, row_files = extract_synapse_rows(synapse_table, save_path, limit, username='', password='') >>> column_name = 'audio_audio.m4a' #, 'audio_countdown.m4a'] >>> out_path = None >>> for i in range(3): >>> row = rows[i] >>> row, filepath = read_file_from_synapse_table(synapse_table, row, column_name, out_path, username, password) >>> print(row) """ import pandas as pd import synapseclient if type(row) == pd.Series: pass elif type(row) == str: # Read row from csv file to pandas Series: row = pd.Series.from_csv(row) else: raise IOError("row should be a pandas Series or a file string") # Log in to Synapse: syn = synapseclient.Synapse(skip_checks=True) if username and password: syn.login(username, password, silent=True) else: syn.login(silent=True) # Try to download file with column_name in row: try: if not out_path: out_path='./row{0}_v{1}_{2}'.format(row['ROW_ID'], row['ROW_VERSION'], row['recordId']) fileinfo = syn.downloadTableFile(synapse_table, rowId=row['ROW_ID'], versionNumber=row['ROW_VERSION'], column=column_name, downloadLocation=out_path) filepath = fileinfo['path'] except IOError as e: import traceback; traceback.print_exc() print("I/O error({0}): {1}".format(e.errno, e.strerror)) filepath = None return row, filepath def row_to_table(row_data, output_table): """ Add row to table using nipype (thread-safe in multi-processor execution). (Requires Python module lockfile) Parameters ---------- row_data : pandas Series row of data output_table : string add row to this table file Examples -------- >>> import pandas as pd >>> from mhealthx.xio import row_to_table >>> row_data = pd.Series({'A': ['A0'], 'B': ['B0'], 'C': ['C0']}) >>> output_table = 'test.csv' >>> row_to_table(row_data, output_table) """ from nipype.algorithms import misc addrow = misc.AddCSVRow() addrow.inputs.in_file = output_table addrow.inputs.set(**row_data.to_dict()) addrow.run() def read_accel_json(input_file, start=0, device_motion=True): """ Read accelerometer or deviceMotion json file. Parameters ---------- input_file : string name of input accelerometer json file start : integer starting index (remove beginning) device_motion : Boolean use deviceMotion vs. accelerometer json file? Returns ------- t : list time points for accelerometer data axyz : list of lists x-, y-, and z-axis accelerometer data gxyz : list of lists x-, y-, and z-axis gravity (if deviceMotion) wxyz : list of lists w, x, y, z attitude quaternion (if deviceMotion) rxyz : list of lists x-, y-, and z-axis rotationRate (if deviceMotion) sample_rate : float sample rate duration : float duration of time series Examples -------- >>> from mhealthx.xio import read_accel_json >>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-90f7096a-84ac-4f29-a4d1-236ef92c3d262549858224214804657.tmp' >>> start = 150 >>> device_motion = True >>> t, axyz, gxyz, wxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion) """ import json from mhealthx.signals import compute_sample_rate, gravity_min_mse f = open(input_file, 'r') json_strings = f.readlines() parsed_jsons = json.loads(json_strings[0]) t = [] ax = [] ay = [] az = [] gx = [] gy = [] gz = [] uw = [] ux = [] uy = [] uz = [] rx = [] ry = [] rz = [] for parsed_json in parsed_jsons[start::]: if device_motion: ax.append(parsed_json['userAcceleration']['x']) ay.append(parsed_json['userAcceleration']['y']) az.append(parsed_json['userAcceleration']['z']) t.append(parsed_json['timestamp']) gx.append(parsed_json['gravity']['x']) gy.append(parsed_json['gravity']['y']) gz.append(parsed_json['gravity']['z']) uw.append(parsed_json['attitude']['w']) ux.append(parsed_json['attitude']['x']) uy.append(parsed_json['attitude']['y']) uz.append(parsed_json['attitude']['z']) rx.append(parsed_json['rotationRate']['x']) ry.append(parsed_json['rotationRate']['y']) rz.append(parsed_json['rotationRate']['z']) else: ax.append(parsed_json['x']) ay.append(parsed_json['y']) az.append(parsed_json['z']) t.append(parsed_json['timestamp']) axyz = [ax, ay, az] gxyz = [gx, gy, gz] wxyz = [uw, ux, uy, uz] rxyz = [rx, ry, rz] sample_rate, duration = compute_sample_rate(t) return t, axyz, gxyz, wxyz, rxyz, sample_rate, duration def read_tap_json(input_file, start=0): """ Read screen tap json file. Parameters ---------- input_file : string name of input screen tap json file start : integer starting index (remove beginning) Returns ------- t : list time points for tap data tx : list x coordinates of touch screen ty : list y coordinates of touch screen button : list buttons tapped sample_rate : float sample rate duration : float duration of time series Examples -------- >>> from mhealthx.xio import read_tap_json >>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/tapping_results.json.TappingSamples-49d2531d-dbda-4b6d-b403-f8763b8e05841011283015383434299.tmp' >>> start = 0 >>> t, tx, ty, button, sample_rate, duration = read_tap_json(input_file, start) """ import json import re from mhealthx.signals import compute_sample_rate f = open(input_file, 'r') json_strings = f.readlines() parsed_jsons = json.loads(json_strings[0]) t = [] tx = [] ty = [] button = [] for parsed_json in parsed_jsons[start::]: txy = re.findall(r'\d+', parsed_json['TapCoordinate']) tx.append(int(txy[0])) ty.append(int(txy[1])) t.append(parsed_json['TapTimeStamp']) button.append(parsed_json['TappedButtonId']) sample_rate, duration = compute_sample_rate(t) return t, tx, ty, button, sample_rate, duration def get_accel(synapse_table, row, column_name, start=0, device_motion=True, out_path='.', username='', password=''): """ Read accelerometer json data from Synapse table row. Calls :: from mhealthx.xio import read_file_from_synapse_table from mhealthx.xio import read_accel_json Parameters ---------- synapse_table : string or Schema a synapse ID or synapse table Schema object row : pandas Series or string row of a Synapse table converted to a Series or csv file column_name : string name of file handle column start : integer starting index (remove beginning) device_motion : Boolean use deviceMotion vs. accelerometer json file? out_path : string or None a local path in which to store downloaded files. If None, stores them in (~/.synapseCache) username : string Synapse username (only needed once on a given machine) password : string Synapse password (only needed once on a given machine) Returns ------- t : list time points for accelerometer data ax : list x-axis acceleration ay : list y-axis acceleration az : list z-axis acceleration gx : list x-axis gravity acceleration gy : list y-axis gravity acceleration gz : list z-axis gravity acceleration rx : list x-axis rotationRate ry : list y-axis rotationRate rz : list z-axis rotationRate uw : list w of attitude quaternion ux : list x of attitude quaternion uy : list y of attitude quaternion uz : list z of attitude quaternion sample_rate : float sample rate duration : float duration of time series row : pandas Series same as passed in: row of a Synapse table as a file or Series file_path : string path to accelerometer file Examples -------- >>> from mhealthx.xio import extract_synapse_rows, read_file_from_synapse_table, get_accel >>> import synapseclient >>> syn = synapseclient.Synapse() >>> syn.login() >>> synapse_table = 'syn4590866' >>> row_series, row_files = extract_synapse_rows(synapse_table, save_path='.', limit=3, username='', password='') >>> column_name = 'deviceMotion_walking_outbound.json.items' >>> device_motion = True >>> start = 150 >>> out_path = None >>> username = '' >>> password = '' >>> for i in range(1): >>> row = row_series[i] >>> row, filepath = read_file_from_synapse_table(synapse_table, row, >>> column_name, out_path, username, password) >>> print(row) >>> t, ax, ay, az, gx, gy, gz, rx, ry, rz, uw, ux, uy, uz, sample_rate, duration, row, file_path = get_accel(synapse_table, >>> row, column_name, >>> start, device_motion, >>> out_path, username, password) """ from mhealthx.xio import read_file_from_synapse_table, read_accel_json # Load row data and accelerometer json file (full path): row, file_path = read_file_from_synapse_table(synapse_table, row, column_name, out_path, username, password) # Read accelerometer json file: t, axyz, gxyz, wxyz, rxyz, sample_rate, \ duration, = read_accel_json(file_path, start, device_motion) ax, ay, az = axyz gx, gy, gz = gxyz rx, ry, rz = rxyz uw, ux, uy, uz = wxyz return t, ax, ay, az, gx, gy, gz, rx, ry, rz, uw, ux, uy, uz, \ sample_rate, duration, row, file_path def get_tap(synapse_table, row, column_name, start=0, out_path='.', username='', password=''): """ Read screen tapping json data from Synapse table row. Calls :: from mhealthx.xio import read_file_from_synapse_table from mhealthx.xio import read_tap_json Parameters ---------- synapse_table : string or Schema a synapse ID or synapse table Schema object row : pandas Series or string row of a Synapse table converted to a Series or csv file column_name : string name of file handle column start : integer starting index (remove beginning) out_path : string or None a local path in which to store downloaded files. If None, stores them in (~/.synapseCache) username : string Synapse username (only needed once on a given machine) password : string Synapse password (only needed once on a given machine) Returns ------- tx : list x-axis screen tap data ty : list y-axis screen tap data t : list time points for accelerometer data sample_rate : float sample rate duration : float duration of time series row : pandas Series same as passed in: row of a Synapse table as a file or Series file_path : string path to accelerometer file Examples -------- >>> from mhealthx.xio import extract_synapse_rows, read_file_from_synapse_table, get_tap >>> import synapseclient >>> syn = synapseclient.Synapse() >>> syn.login() >>> synapse_table = 'syn4590866' >>> row_series, row_files = extract_synapse_rows(synapse_table, save_path='.', limit=3, username='', password='') >>> column_name = 'deviceMotion_walking_outbound.json.items' >>> start = 150 >>> out_path = None >>> username = '' >>> password = '' >>> for i in range(1): >>> row = row_series[i] >>> row, filepath = read_file_from_synapse_table(synapse_table, row, >>> column_name, out_path, username, password) >>> print(row) >>> tx, ty, t, sample_rate, duration, row, file_path = get_tap(synapse_table, >>> row, column_name, >>> start, out_path, username, password) """ from mhealthx.xio import read_file_from_synapse_table, read_tap_json # Load row data and accelerometer json file (full path): row, file_path = read_file_from_synapse_table(synapse_table, row, column_name, out_path, username, password) # Read accelerometer json file: t, tx, ty, button, sample_rate, duration = read_tap_json(file_path, start) return tx, ty, t, sample_rate, duration, row, file_path def get_convert_audio(synapse_table, row, column_name, convert_file_append='', convert_command='ffmpeg', convert_input_args='-y -i', convert_output_args='-ac 2', out_path='.', username='', password=''): """ Read data from a row of a Synapse table and convert audio file. Calls :: from mhealthx.xio import read_file_from_synapse_table from mhealthx.xio import convert_audio_file Parameters ---------- synapse_table : string or Schema a synapse ID or synapse table Schema object row : pandas Series or string row of a Synapse table converted to a Series or csv file column_name : string name of file handle column convert_file_append : string append to file name to indicate converted file format (e.g., '.wav') convert_command : string executable command without arguments convert_input_args : string arguments preceding input file name for convert_command convert_output_args : string arguments preceding output file name for convert_command out_path : string or None a local path in which to store downloaded files. username : string Synapse username (only needed once on a given machine) password : string Synapse password (only needed once on a given machine) Returns ------- row : pandas Series same as passed in: row of a Synapse table as a file or Series new_file : string full path to the converted file Examples -------- >>> from mhealthx.xio import get_convert_audio >>> from mhealthx.xio import extract_synapse_rows, read_file_from_synapse_table >>> import synapseclient >>> syn = synapseclient.Synapse() >>> syn.login() >>> synapse_table = 'syn4590865' >>> row_series, row_files = extract_synapse_rows(synapse_table, save_path='.', limit=3, username='', password='') >>> column_name = 'audio_audio.m4a' #, 'audio_countdown.m4a'] >>> convert_file_append = '.wav' >>> convert_command = 'ffmpeg' >>> convert_input_args = '-y -i' >>> convert_output_args = '-ac 2' >>> out_path = '.' >>> username = '' >>> password = '' >>> for i in range(1): >>> row = row_series[i] >>> row, filepath = read_file_from_synapse_table(synapse_table, row, >>> column_name, out_path, username, password) >>> print(row) >>> row, new_file = get_convert_audio(synapse_table, >>> row, column_name, >>> convert_file_append, >>> convert_command, >>> convert_input_args, >>> convert_output_args, >>> out_path, username, password) """ from mhealthx.xio import read_file_from_synapse_table, convert_audio_file row, file_path = read_file_from_synapse_table(synapse_table, row, column_name, out_path, username, password) if convert_file_append: renamed_file = file_path + convert_file_append new_file = convert_audio_file(old_file=file_path, new_file=renamed_file, command=convert_command, input_args=convert_input_args, output_args=convert_output_args) else: new_file = None return row, new_file def convert_audio_file(old_file, new_file, command='ffmpeg', input_args='-i', output_args='-ac 2'): """ Convert audio file to new format. Parameters ---------- old_file : string full path to the input file new_file : string full path to the output file command : string executable command without arguments input_args : string arguments preceding input file name in command output_args : string arguments preceding output file name in command Returns ------- new_file : string full path to the output file Examples -------- >>> from mhealthx.xio import convert_audio_file >>> old_file = '/Users/arno/mhealthx_cache/mhealthx/feature_files/test.m4a' >>> new_file = 'test.wav' >>> command = 'ffmpeg' >>> input_args = '-y -i' >>> output_args = '-ac 2' >>> new_file = convert_audio_file(old_file, new_file, command, input_args, output_args) """ import os from nipype.interfaces.base import CommandLine if not os.path.isfile(old_file): raise IOError("{0} does not exist.".format(old_file)) new_file = None else: input_args = ' '.join([input_args, old_file, output_args, new_file]) try: # Nipype command line wrapper: cli = CommandLine(command = command) cli.inputs.args = input_args cli.cmdline cli.run() except: import traceback; traceback.print_exc() print("'{0} {1}' unsuccessful".format(command, input_args)) new_file = None return new_file def write_wav(data, file_stem, file_append, sample_rate=44100, amplitude=32700): """ Convert a list or array of numbers to a .wav format audio file. After: http://blog.acipo.com/wave-generation-in-python/ and https://gist.github.com/Pretz/1773870 and http://codingmess.blogspot.com/2008/07/ how-to-make-simple-wav-file-with-python.html Parameters ---------- data : list or array of floats or integers input data to convert to audio file file_stem : string stem of file name of output audio file (including absolute path) file_append : string append string to file_stem for full output audio file path and name sample_rate : integer number of desired samples per second for audio file amplitude : integer maximum amplitude for audio file (32700 is within signed short) Returns ------- wav_file : string name of output .wav audio file Examples -------- >>> from mhealthx.xio import write_wav >>> import numpy as np >>> from scipy.signal import resample >>> file_stem = '/desk/temp' >>> file_append = 'write_wav.wav' >>> sample_rate = 44100 >>> amplitude = 32700 >>> data = np.random.random(500000) >>> data /= np.max(np.abs(data)) >>> #data = resample(data, sample_rate/framerate) >>> wav_file = write_wav(data, file_stem, file_append, sample_rate, amplitude) """ import os import numpy as np import wave import struct wav_file = file_stem + file_append wavfile = wave.open(wav_file, "w") nchannels = 1 sampwidth = 2 framerate = sample_rate nframes = len(data) comptype = "NONE" compname = "not compressed" wavfile.setparams((nchannels, sampwidth, framerate, nframes, comptype, compname)) data = np.asarray(data) data /= np.max(np.abs(data)) data *= amplitude data = np.round(data) for x in data: value = struct.pack('i', x) wavfile.writeframesraw(value) wavfile.writeframes('') wavfile.close() if not os.path.isfile(wav_file): raise IOError("{0} has not been written.".format(filename)) return wav_file def concatenate_tables_vertically(tables, output_csv_file=None): """ Vertically concatenate multiple table files or pandas DataFrames with the same column names and store as a csv table. Parameters ---------- tables : list of table files or pandas DataFrames each table or dataframe has the same column names output_csv_file : string or None output table file (full path) Returns ------- table_data : Pandas DataFrame output table data output_csv_file : string or None output table file (full path) Examples -------- >>> import pandas as pd >>> from mhealthx.xio import concatenate_tables_vertically >>> df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], >>> 'B': ['B0', 'B1', 'B2', 'B3'], >>> 'C': ['C0', 'C1', 'C2', 'C3']}, >>> index=[0, 1, 2, 3]) >>> df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], >>> 'B': ['B4', 'B5', 'B6', 'B7'], >>> 'C': ['C4', 'C5', 'C6', 'C7']}, >>> index=[0, 1, 2, 3]) >>> tables = [df1, df2] >>> tables = ['/Users/arno/csv/table1.csv', '/Users/arno/csv/table2.csv'] >>> output_csv_file = None #'./test.csv' >>> table_data, output_csv_file = concatenate_tables_vertically(tables, output_csv_file) """ import os import pandas as pd if not tables: table_data = None output_csv_file = None else: try: # pandas DataFrames: if type(tables[0]) == pd.DataFrame: tables_no_Nones = [] for table in tables: if table is not None and type(table) == pd.DataFrame: tables_no_Nones.append(table) tables = tables_no_Nones # file strings: elif type(tables[0]) == str: tables_from_files = [] for table in tables: if os.path.isfile(table): tables_from_files.append(pd.read_csv(table)) else: raise Warning('{0} is not a file.'.format(table)) tables = tables_from_files else: raise Warning("'tables' should contain strings or " "pandas DataFrames.") # Vertically concatenate tables: table_data = pd.concat(tables, ignore_index=True) # Store as csv file: if output_csv_file: table_data.to_csv(output_csv_file, index=False) except: import traceback; traceback.print_exc() table_data = None output_csv_file = None return table_data, output_csv_file def select_columns_from_table(table, column_headers, write_table=True, output_table=''): """ Select columns from table and make a new table. Parameters ---------- table : string column_headers : list of strings headers for columns to select write_table : Boolean write output table? output_table : string output table file name Returns ------- columns : list of lists of floats or integers columns of data output_table : string output table file name Examples -------- >>> import os >>> from mhealthx.xio import select_columns_from_table >>> path = os.environ['MHEALTHX_OUTPUT'] >>> table = os.path.join(path, 'feature_tables', ... 'tap_row0_v0_9d44a388-5d7e-4271-8705-2faa66204486.csv') >>> column_headers = ['tapping_results.json.ButtonRectLeft', ... 'accel_tapping.json.items'] >>> write_table = True >>> output_table = '' >>> columns, output_table = select_columns_from_table(table, column_headers, write_table, output_table) """ import os import pandas as pd #------------------------------------------------------------------------- # Load table: #------------------------------------------------------------------------- input_columns = pd.read_csv(table) #------------------------------------------------------------------------- # Construct a table from selected columns: #------------------------------------------------------------------------- columns = input_columns[column_headers] #------------------------------------------------------------------------- # Write table: #------------------------------------------------------------------------- if write_table and not columns.empty: if not output_table: output_table = os.path.join(os.getcwd(), 'select_columns_from_table.csv') columns.to_csv(output_table, index=False) else: print('Not saving table.') return columns, output_table def write_synapse_table(table_data, synapse_project_id, table_name='', username='', password=''): """ Write data to a Synapse table. Parameters ---------- table_data : Pandas DataFrame Synapse table contents synapse_project_id : string Synapse ID for project within which table is to be written table_name : string schema name of table username : string Synapse username (only needed once on a given machine) password : string Synapse password (only needed once on a given machine) Examples -------- >>> import os >>> import pandas as pd >>> from mhealthx.xio import write_synapse_table >>> path = os.environ['MHEALTHX_OUTPUT'] >>> table = os.path.join(path, 'feature_tables', ... 'tap_row0_v0_9d44a388-5d7e-4271-8705-2faa66204486.csv') >>> table_data = pd.read_csv(table) >>> username = '' >>> password = '' >>> synapse_project_id = 'syn4899451' >>> table_name = 'Contents of table' >>> write_synapse_table(table_data, synapse_project_id, table_name, username, password) """ import synapseclient from synapseclient import Schema, Table, as_table_columns syn = synapseclient.Synapse(skip_checks=True) # Log in to Synapse: if username and password: syn.login(username, password, silent=True) else: syn.login(silent=True) #table_data.index = range(table_data.shape[0]) schema = Schema(name=table_name, columns=as_table_columns(table_data), parent=synapse_project_id, includeRowIdAndRowVersion=False) syn.store(Table(schema, table_data)) def write_columns_to_synapse_table(table, column_headers, synapse_project_id, table_name='', username='', password=''): """ Select columns from a table and write data to a Synapse table. Parameters ---------- table : string column_headers : list of strings headers for columns to select synapse_project_id : string Synapse ID for project within which table is to be written table_name : string schema name of table username : string Synapse username (only needed once on a given machine) password : string Synapse password (only needed once on a given machine) Examples -------- >>> import os >>> from mhealthx.xio import write_columns_to_synapse_table >>> path = os.environ['MHEALTHX_OUTPUT'] >>> table = os.path.join(path, 'feature_tables', ... 'tap_row0_v0_9d44a388-5d7e-4271-8705-2faa66204486.csv'] >>> column_headers = ['tapping_results.json.ButtonRectLeft', ... 'accel_tapping.json.items'] >>> username = '' >>> password = '' >>> synapse_project_id = 'syn4899451' >>> table_name = 'Contents written to ' + synapse_table >>> write_columns_to_synapse_table(table, column_headers, synapse_project_id, table_name, username, password) """ from mhealthx.xio import select_columns_from_table, write_synapse_table #------------------------------------------------------------------------- # Select columns: #------------------------------------------------------------------------- columns, output_table = select_columns_from_table(table, column_headers, False, '') write_synapse_table(columns, synapse_project_id, table_name, username, password) # ============================================================================ if __name__ == '__main__': pass
# Import all the bits and bobs we'll need import numpy as np import pandas as pd import matplotlib.pyplot as plt import slippi import time from datetime import datetime import os import sys import matplotlib.image as mpimg import matplotlib import gc # from slippi import Game # from slippi import Game1Frame from slippi import Game import json import preprocessing as pp import states import pdb #debugger global folderUrl folderUrl = "" #####wins and losses stats########## myWinLoss = pp.myWinLoss oppWinLoss = pp.oppWinLoss myKillDeath = pp.myKillDeath myMatchups = pp.myMatchups all_games = pd.DataFrame(columns = ['jiggs_port', 'opp_port', 'matchup', 'stage', # 'win', 'duration', # 'filename', 'date']) MYWINLOSS = None OPPWINLOSS = None MYRATIO = None######kills and deaths stats############ MYKILLDEATH = None MYMATCHUPSLIST = [] MYMATCHUPS = None """ ----------- STATS ----------- shortest game ? longest game ? number of 4 stocks, i've done X number of 4 stocks on me X num of deaths X num of kills X total w/l X w/l for each character X total k/d X k/d for each character X data over time: wins (per character too) stock graph matchups? """ MyFourStocks = 0 OppFourStocks = 0 Deaths = 0 Kills = 0 def getStats(characters): global MyFourStocks, OppFourStocks, Deaths, Kills, game, myWinLoss, oppWinLoss #refactor countwins and losses try: countDeaths(characters) except AttributeError: #"'NoneType' has no attribute 'leader'" return False except ValueError: return False countWins(characters) countMatchups(characters) return True def countWins(characters): global filename, wins, losses, i, game, myWinLoss, oppWinLoss me = pp.character_name(characters[0][1]) opp = pp.character_name(characters[1][1]) date = datetime.strptime(filename[5:-4], "%Y%m%dT%H%M%S") game.won = wonGame(game, characters[0][0], characters[1][0]) # pdb.set_trace() if game.won: game.won = "WON" myWinLoss[me]["wins"] += 1 oppWinLoss[opp]["losses"] += 1 wins += 1 else: game.won = "LOST" myWinLoss[me]["losses"] += 1 oppWinLoss[opp]["wins"] += 1 # print(f"Adding 1 loss to {me}, 1 win to {opp}") losses += 1 # print(myWinLoss) # print(oppWinLoss) # print(f"{i}: {game.won} {pp.character_name(characters[0][1])} (me) vs. {pp.character_name(characters[1][1])} on {datetime.strftime(date, '%D, %T')}") #we don't care about dittos # if characters[0][1] != pp.jiggs[0] and characters[1][1] != pp.jiggs[0]: # if characters[0][1] != pp.jiggs[0]: # characters.reverse() # add_game(game, characters, filename, date) def countDeaths(characters): # count deaths of me, add to total, count 4 stocks global MyFourStocks, OppFourStocks, Deaths, Kills, game myLastState = None oppLastState = None myDeathsThisGame = 0 oppDeathsThisGame = 0 for frame in game.frames: myState = slippi.id.ActionState(frame.ports[0].leader.post.state) oppState = slippi.id.ActionState(frame.ports[1].leader.post.state) if states.is_dead(myState): #i died if myState != myLastState: #check to make sure it's not duplicated myLastState = myState Deaths += 1 myDeathsThisGame += 1 myKillDeath[pp.character_name(frame.ports[0].leader.post.character)]["deaths"] += 1 else: myLastState = None if states.is_dead(oppState): #opponent died if oppState != oppLastState: #check to make sure it's not duplicated oppLastState = oppState Kills += 1 oppDeathsThisGame += 1 myKillDeath[pp.character_name(frame.ports[0].leader.post.character)]["kills"] += 1 else: oppLastState = None if myDeathsThisGame == 4 and oppDeathsThisGame == 0: MyFourStocks += 1 if myDeathsThisGame == 0 and oppDeathsThisGame == 4: OppFourStocks += 1 # print([myKillDeath[x] for x in myKillDeath if myKillDeath[x] and myKillDeath[x]["kills"]]) def countMatchups(characters): # count matchup wins and losses global myMatchups, game me = pp.character_name(characters[0][1]) opp = pp.character_name(characters[1][1]) # print(game.won) if game.won == "WON": myMatchups[me][opp]["wins"] += 1 else: myMatchups[me][opp]["losses"] += 1 # print(myMatchups[me][opp]) # Determines if i won def wonGame(game, myPort, opp_port): # Get the last frame last_frame = game.frames[-1] # The post-processing result of the last frame j_post = last_frame.ports[myPort].leader.post opp_post = last_frame.ports[opp_port].leader.post # First, compare stock counts j_stocks = j_post.stocks opp_stocks = opp_post.stocks if j_stocks > opp_stocks: return True if j_stocks < opp_stocks: return False # If stocks are the same, compare percent j_dmg = j_post.damage opp_dmg = opp_post.damage # It's almost unheard of for both players to end at the exact same percent. # In this extremely unlikely event, we give the match to jiggs if j_dmg <= opp_dmg: return True else: return False # Add a single game to our dataframe def add_game(game, characters, fname, date): global all_games myPort = characters[0][0] opp_port = characters[1][0] game_data = { 'jiggs_port': myPort, 'opp_port': opp_port, 'matchup': pp.character_name(opp_port), 'stage': pp.stage_name(game.start.stage), 'win': game.won, 'duration': game.metadata.duration, # 'filename': fname, 'date': datetime.strftime(date, '%D, %T') } all_games = all_games.append(game_data, ignore_index = True) def addWinLoss(): global myWinLoss, oppWinLoss, MYWINLOSS, OPPWINLOSS, MYRATIO # i = 0 MYWINLOSS = None MYWINLOSS = pd.DataFrame(columns = ['character', 'wins', 'losses']) MYRATIO = None MYRATIO = pd.DataFrame(columns = ['character', 'wlRatio']) for key in myWinLoss: if myWinLoss[key]: if myWinLoss[key]["wins"] == 0 and myWinLoss[key]["losses"] == 0: continue data = { "character": key, "wins": myWinLoss[key]["wins"], "losses": myWinLoss[key]["losses"] } MYWINLOSS = MYWINLOSS.append(data, ignore_index = True) if myWinLoss[key]["losses"] == 0: myWinLoss[key]["losses"] = 1 MYRATIO = MYRATIO.append({"character": key, "wlRatio": myWinLoss[key]["wins"] / myWinLoss[key]["losses"]}, ignore_index = True) OPPWINLOSS = None OPPWINLOSS = pd.DataFrame(columns = ['character', 'wins', 'losses']) for key in oppWinLoss: if oppWinLoss[key]["wins"] == 0 and oppWinLoss[key]["losses"] == 0: continue data = { "character": key, "wins": oppWinLoss[key]["wins"], "losses": oppWinLoss[key]["losses"] } OPPWINLOSS = OPPWINLOSS.append(data, ignore_index = True) #add to dataframe def addKillDeath(): global myKillDeath, MYKILLDEATH MYKILLDEATH = None MYKILLDEATH = pd.DataFrame(columns = ['character', 'kills', 'deaths']) for key in myKillDeath: if myKillDeath[key] and myKillDeath[key]["deaths"] > 0: data = { "character": key, "kills": myKillDeath[key]["kills"], "deaths": myKillDeath[key]["deaths"] } MYKILLDEATH = MYKILLDEATH.append(data, ignore_index = True)#add to dataframe def addMatchups(characters=pp.all_characters): global myMatchups, MYMATCHUPS MYMATCHUPS = None matchupsCharacterData = {} MYMATCHUPS = pd.DataFrame(columns = ['character', 'wins', 'losses']) # print("columns: " + JSON.stringify(c)) for me in myMatchups: if me != "Pichu" and me != "Bowser" and me != "Kirby" and me != "Ness" and me != "Mewtwo": # if me in characters: matchupsCharacterData[me] = [] for opp in myMatchups[me]: data = {} # pdb.set_trace() # if myMatchups[me][opp]["wins"] + myMatchups[me][opp]["losses"] > 10: #if more than 100 games data["character"] = opp data["wins"] = myMatchups[me][opp]["wins"] data["losses"] = myMatchups[me][opp]["losses"] if data["wins"] > 0 or data["losses"] > 0: matchupsCharacterData[me].append(data) # MYMATCHUPS = MYMATCHUPS.append(data, ignore_index = True) # print("MYMATCHUPS") # print(MYMATCHUPS) # matchupsCharacterData.append(MYMATCHUPS) return matchupsCharacterData # Makes sure the game is not corrupt before returning it def validate_game(fname): # print(folderUrl + "/" + fname) try: game = Game(folderUrl + "/" + fname) # game = Game1Frame('games/' + fname) return game except KeyboardInterrupt: sys.exit() except: print('Game ' + fname + ' contains corrupt data.') return None def makeMeFirstPlayer(game, ports): global characters if game.myport == 0: if ports[0]: char = ports[0].leader.post.character characters.append((0, char)) char = ports[1].leader.post.character characters.append((1, char)) elif game.myport == 1: if ports[1]: char = ports[1].leader.post.character characters.append((1, char)) char = ports[0].leader.post.character characters.append((0, char)) else: print("3+ player game, skipping..."); return False return True def populateData(portDict, url): global folderUrl, myWinLoss, oppWinLoss, wins, losses, badfiles, filename, wins, losses, i, characters, game folderUrl = url # setupRecords() wins = 0 losses = 0 # print(f"myports: {myports}") i = 0 badfiles = 0 # print(myWinLoss) for filename in os.listdir(folderUrl): try: myport = portDict[filename] #check if filename exists in portDict except KeyError: badfiles += 1 continue # no match found game = validate_game(filename) #massively slow, why? # game = Game1Frame('games/' + filename) #for short, quick data # print(game.frames[1].ports) if not game: badfiles += 1 continue game.myport = myport del myport #First frame of the game try: frame_one = game.frames[1] except IndexError: badfiles += 1 continue ports = frame_one.ports characters = list() ##### i am always first player :) ##### try: if not makeMeFirstPlayer(game, ports): continue except AttributeError: #strange error, corrupted data badfiles += 1 continue ###### try: me = pp.character_name(characters[0][1]) opp = pp.character_name(characters[1][1]) date = datetime.strptime(filename[5:-4], "%Y%m%dT%H%M%S") except IndexError: badfiles += 1 continue if not getStats(characters): #if there's an error in the game badfiles += 1 continue i += 1 # if i % 10 == 0: #for testing # plotData() # printData() printDataForParse() time.sleep(1) print("DONE") plotData() # print(f"{badfiles} bad files found") def plotData(): global MYWINLOSS, OPPWINLOSS, MYRATIO, wins, losses, badfiles addWinLoss() addKillDeath() matchupsCharacterData = addMatchups() # print("plotting") killColors = ['b', 'black'] winColors = ['g', 'r'] MYWINLOSS.plot(x="character", y=["wins", "losses"], kind="bar", color=winColors).set_title("My Character\'s wins and losses") MYRATIO.plot(x="character", y="wlRatio", kind="bar", color=['c']).set_title("My Character\'s win/loss ratio") OPPWINLOSS.plot(x="character", y=["wins", "losses"], kind="bar", color=winColors[::-1]).set_title("Opponent's Character's wins and losses") MYKILLDEATH.plot(x="character", y=["kills", "deaths"], kind="bar", color=killColors).set_title("My Character's kills and deaths") for x in matchupsCharacterData: if matchupsCharacterData[x]: MYMATCHUPS = None MYMATCHUPS = pd.DataFrame(columns = ['character', 'wins', 'losses']) MYMATCHUPS = MYMATCHUPS.append(matchupsCharacterData[x], ignore_index = True) MYMATCHUPS.plot(x="character", y=["wins", "losses"], kind="bar", color=winColors).set_title(f"My {x}'s matchups") plt.show() def printData(): global myWinLoss, oppWinLoss, wins, losses, badfiles print() print() print(f"----{Kills} total kills----{Deaths} total deaths----{Kills/Deaths} (Ratio)----{MyFourStocks} 4stocks----{OppFourStocks} opp4stocks----") print() print("____________________________MY_WIN_LOSS_RATIO__________________________") for key in myWinLoss: if myWinLoss[key] and myWinLoss[key]["losses"]: print(f"{key}: {myWinLoss[key]['wins']} total wins, {myWinLoss[key]['losses']} total losses. Ratio of {myWinLoss[key]['wins'] / myWinLoss[key]['losses']}") print("___________________________OPP_WIN_LOSS_RATIO__________________________") for key in oppWinLoss: if oppWinLoss[key] and oppWinLoss[key]["losses"]: print(f"{key}: {oppWinLoss[key]['wins']} total wins, {oppWinLoss[key]['losses']} total losses. Ratio of {oppWinLoss[key]['wins'] / oppWinLoss[key]['losses']}") print("_______________________________________________________________________") print(f"{wins} total wins, {losses} total losses. Ratio of {wins/losses}") print("_______________________________________________________________________") def printDataForParse(): global myWinLoss, oppWinLoss, wins, losses, badfiles pData = [wins, losses, Kills, Deaths, MyFourStocks, OppFourStocks, i, badfiles] # pData[0] = wins # pData[1] = losses # pData[2] = Kills # pData[3] = Deaths # pData[4] = MyFourStocks # pData[5] = OppFourStocks # pData[6] = i # pData[7] = badfiles print(pData)
#!/usr/bin/env python3 import cv2 import hashlib import imghdr import numpy as np import os from PIL import Image, ImageDraw from preprocess import ( image_resize, file_as_bytes, hash_file, rename_files, is_transparent, append_background, blacklist, update_classes ) from random import randint import sys from yolo_label_tools import count_from_top, find_pixel_edges, find_yolo_coordinates from forecut import forecut, forecut_multiple image_dir = "images" class_file = "classes.txt" images = [] texts = [] errors = [] for r, d, f in os.walk(image_dir, topdown=False): for file in f: path = os.path.join(r, file) if imghdr.what(path) != None: images.append(path) elif path.endswith(".txt"): texts.append(path) else: errors.append(path) # TODO: deal with errors #### Prompt to perform updating of any text files (from an update to class_file) update_classes(class_file, texts) #### Find unlabeled (i.e. new) images images_basenames = [os.path.splitext(image)[0] for image in images] images_extensions = [os.path.splitext(image)[1] for image in images] extensions_dict = dict(zip(images_basenames, images_extensions)) texts_basenames = [os.path.splitext(text)[0] for text in texts] print(texts_basenames) lonely_images = set(images_basenames) - set(texts_basenames) print(lonely_images) unlabeled_images = [li + extensions_dict[li] for li in lonely_images] print("Unlabeled images: ", unlabeled_images) print() print("Renaming/resizing images...\n") #### rename, resize and separate new images transparent_filepaths = [] opaque_filepaths = [] unique_images = list( set(images) - set(unlabeled_images) ) # labeled images (already preprocessed) for unlabeled_image in unlabeled_images: new_name = rename_files(unlabeled_image, unique_images) try: image_resize(new_name) if is_transparent(new_name): transparent_filepaths.append(new_name) if new_name not in transparent_filepaths else None else: opaque_filepaths.append(new_name) if new_name not in opaque_filepaths else None except FileNotFoundError: print("File not found") continue #### Take care of transparent images print("Transparent images: ", transparent_filepaths) print() print("Opaque images: ", opaque_filepaths) print() # Add bounding boxes print("Adding bounding boxes to transparent images...\n") class_labels = [] with open("classes.txt") as f: for line in f: class_labels.append(line.strip("\n")) for transparent_path in transparent_filepaths: class_label = os.path.normpath(transparent_path).split(os.sep)[1] class_label_number = str(class_labels.index(class_label)) # yolo counts from 0 coordinates = find_yolo_coordinates(transparent_path) # remove blank image, (repeated below, consider during refactorization if coordinates == None: print("blank image:", transparent_path,'removing...') os.remove(transparent_path) transparent_filepaths.remove(transparent_path) continue coordinates = [str(coordinate) for coordinate in coordinates] line = " ".join([class_label_number] + coordinates) file_stem = os.path.splitext(transparent_path)[0] with open(f"{file_stem}.txt", "w") as f: f.write(line) # Add background!!! print("Adding backgrounds to transparent images...\n") image_filepaths = transparent_filepaths # Contains all the image filepaths image_folderpaths = [ os.path.normpath(transparent_filepath).split(os.sep)[1] for transparent_filepath in transparent_filepaths ] # Contains the folder names of all the images bg_filepaths = [] # Will contain all the background filepaths bg_folderpaths = [] # Will contain all the folder names of all the backgrounds for r, d, f in os.walk("bg", topdown=False): for file in f: # For all the background images path = os.path.join(r, file) folder = os.path.normpath(r).split(os.sep)[1] bg_filepaths.append(path) bg_folderpaths.append(folder) # Get the number of background categories (to prevent infinite loop # if all background categories are blacklisted) number_bg_folders = len(list(set(bg_folderpaths))) for x in range(0, len(image_filepaths)): try: if len(blacklist[image_folderpaths[x]]) >= number_bg_folders: print("You blacklisted all background types. Skipping image.") # continue else: # Pick a random background random_image = randint(0, len(bg_filepaths) - 1) # Reroll if selected background is in the blacklist while bg_folderpaths[random_image] in blacklist[image_folderpaths[x]]: random_image = randint(0, len(bg_filepaths) - 1) append_background(image_filepaths[x], bg_filepaths[random_image]) except KeyError: # Object has no blacklist print("Warning: Object not in blacklist") random_image = randint(0, len(bg_filepaths) - 1) append_background(image_filepaths[x], bg_filepaths[random_image]) print() # === Background Removal === # # TODO: Create (if not already existing) an `output` directory with the same # file structure of subdirectories and images # ====== ============ ====== # if (len(opaque_filepaths) > 0): forecut_multiple(opaque_filepaths) # creates file without background else: print("No background removal needed") blank_images = [] # If model removes everything, we must deal with it manually for opaque_path in opaque_filepaths: # e.g. opaque_path = images/tires/abc.jpg file_stem = os.path.splitext(opaque_path)[0] # images/tires/abc output_file = os.path.join( "output", file_stem + ".png" ) # output/images/tires/abc.png class_label = os.path.normpath(opaque_path).split(os.sep)[1] # tires class_label_number = str(class_labels.index(class_label)) # yolo counts from 0 coordinates = find_yolo_coordinates(output_file) # remove blank images if coordinates == None: print("blank image:", output_file) blank_images.append(opaque_path) # keep images where background is removed opaque_filepaths.remove(opaque_path) continue coordinates = [str(coordinate) for coordinate in coordinates] line = " ".join([class_label_number] + coordinates) print("appending coordinates:", line) with open(f"{file_stem}.txt", "w") as f: f.write(line) if blank_images: print("""\nThe background was removed entirely for the following images, \ consider manual labeling""") for blank in blank_images: print("blank:", blank) print("Pipeline complete!")
import timeit import matplotlib.pyplot as plt import numpy as np from gefest.core.geometry.geometry_2d import Geometry2D, create_circle from gefest.core.opt.analytics import EvoAnalytics from gefest.core.opt.optimize import optimize from gefest.core.opt.setup import Setup from gefest.core.structure.domain import Domain from gefest.core.structure.structure import Structure from gefest.core.viz.struct_vizualizer import StructVizualizer geometry = Geometry2D() domain = Domain(allowed_area=[(0, 0), (0, 300), (300, 300), (300, 0), (0, 0)], geometry=geometry, max_poly_num=1, min_poly_num=1, max_points_num=40, min_points_num=30) task_setup = Setup(domain=domain) def area_length_ratio(struct: Structure): poly = struct.polygons[0] area = geometry.get_square(poly) length = geometry.get_length(poly) if area == 0: return None ratio = 1 - 4 * np.pi * area / length ** 2 return ratio start = timeit.default_timer() optimized_structure = optimize(task_setup=task_setup, objective_function=area_length_ratio, pop_size=100, max_gens=20) spend_time = timeit.default_timer() - start visualiser = StructVizualizer(task_setup.domain) plt.figure(figsize=(7, 7)) info = {'spend_time': spend_time, 'fitness': area_length_ratio(optimized_structure), 'type': 'prediction'} visualiser.plot_structure(optimized_structure, info) true_circle = Structure(create_circle(optimized_structure)) info = {'spend_time': spend_time, 'fitness': area_length_ratio(true_circle), 'type': 'true'} visualiser.plot_structure(true_circle, info) plt.show() EvoAnalytics.create_boxplot()
from typing import Dict, Optional, Any import torch from scipy.stats import wasserstein_distance import torchmetrics # This implementation is based on scipy.stats.wasserstein_metric. # There are solvers for differentiable approximations with GPU acceleration # the geomloss package: https://www.kernel-operations.io/geomloss/api/pytorch-api.html#geomloss.SamplesLoss class WassersteinMetric(torchmetrics.Metric): def __init__(self, compute_on_step: Optional[bool] = None, **kwargs: Dict[str, Any]) -> None: super().__init__(compute_on_step, **kwargs) # intiialise states self.add_state("preds", default=[], dist_reduce_fx="cat") self.add_state("target", default=[], dist_reduce_fx="cat") def update(self, preds: torch.Tensor, target: torch.Tensor): assert preds.shape == target.shape, ( "``preds`` and ``target`` need to have the same shape. " + f"Got ``{preds.shape}`` and ``{target.shape}`` instead." ) preds, target = preds.flatten(), target.flatten() assert preds.dim() == 1, f"The input tensors need to be one-dimensional. Got {preds.dim()} dimensions." self.preds.append(preds) self.target.append(target) def compute(self): preds = torch.cat(self.preds) target = torch.cat(self.target) metric = wasserstein_distance(preds.cpu().numpy(), target.cpu().numpy()) return metric
# Do objects move in 2 directions at once? If a velocity vector of an object can be divided into an x and y component relative to a second object's position, and both objects have gravity that attracts both objects to each other. We then know that the object is not moving in a straight path. How is the object able to move in 2 directions at once(x and y component of velocity). If you infinitely slowed down time to observe the movement of the object, would you see short frames where the object is only moving only vertically, then moving only horizontally, and alternating between both? Can someone explain the physics behind why what I'm saying is inaccurate. p.s I just started learning physics (gr 11), so I might not understand extremely complex explanations. • The real world doesn't have $(x,y)$ coordinates. They're just there as a way of labelling the direction something is moving, but that something doesn't have to be moving on a literal grid. – jacob1729 May 15 '19 at 16:33 • Rotate your X and Y axes such that the motion seems to be along either of them, and you'll interpret it as moving along only one direction – Eagle May 15 '19 at 17:05
#### 12th Standard Physics English Medium Electrostatics Reduced Syllabus Important Questions with Answer key 2021 12th Standard Reg.No. : • • • • • • Physics Time : 01:00:00 Hrs Total Marks : 100 Multiple Choice Questions 15 x 1 = 15 1. Which charge configuration produces a uniform electric field? (a) point Charge (b) infinite uniform line charge (c) uniformly charged infinite plane (d) uniformly charged spherical shell 2. The total electric fl ux for the following closed surface which is kept inside water (a) $\frac { 80q }{ { \varepsilon }_{ 0 } }$ (b) $\frac { q }{ { 40\varepsilon }_{ 0 } }$ (c) $\frac { q }{ { 80\varepsilon }_{ 0 } }$ (d) $\frac { q }{ { 160\varepsilon }_{ 0 } }$ 3. An electric field $\vec { E } =10x\hat { i }$ exists in a certain region of space. Then the potential difference V = Vo – VA, where Vo is the potential at the origin and VA is the potential at x = 2 m is: (a) 10 J (b) -20 J (c) +20 J (d) -10 J 4. In electrostatics if the charges are in motion, another force named __________ comes into play in addition to coulomb force (a) Lorentz force (b) Repulsive force (c) Attractive force (d) electromagnetic force 5. _______ and Coulomb's law form fundamental principles of electrostatics (a) Newton's law of gravitation (b) superposition principle (c) ohm's law (d) Kepler's law 6. An isolated metal sphere of radius 'r' is given a charge' q'. The potential energy of the sphere is (a) $\frac { { q }^{ 2 } }{ 4\pi { \varepsilon }_{ 0 }r }$ (b) $\frac { { q }^{ } }{ 4\pi { \varepsilon }_{ 0 }r }$ (c) $\frac { { q }^{ } }{ 8\pi { \varepsilon }_{ 0 }r }$ (d) $\frac { { q }^{ 2 } }{ 8\pi { \varepsilon }_{ 0 }r }$ 7. Which one of these is a vector quantity? (a) Electric charge (b) Electric field (c) Electric flux (d) Electric potential 8. The electric potential V as a function of distance x (metres) is given by V = ( 5x2 + 10x -9) volt. The value of electric field at a point x = 1m is (a) 20 Vm-1 (b) 6 Vm-1 (c) 11 Vm-1 (d) -23 Vm-1 9. A non-conducting material which has no free electrons is called (a) capacitor (b) Dielectric (c) conductor (d) Inductor 10. An electric dipole placed at an angle in a nonuniform electric field experiences (a) neither a force nor a torque (b) torque (c) both force and torque (d) force only 11. Value of k in Coulomb's law depends upon (a) magnitude of charges (b) distance between charges (c) both (a) and (b) (d) medium between two charges 12. One Joule per Coulomb is called (a) Gauss (b) ampere (c) (d) volt 13. The concentric spheres of radii R and r have similar charges with equal surface densities (σ). What is the electric potential at their common centre? (a) $\frac { \sigma }{ { \varepsilon }_{ 0 } } (R-r)$ (b) $\frac { \sigma }{ { { \varepsilon }_{ 0 } } } (R+r)$ (c) $R\frac { \sigma }{ { \varepsilon }_{ 0 } }$ (d) $\frac { \sigma }{ { \varepsilon }_{ 0 } }$ 14. Gauss law is another form of ________. (a) Newton's law (b) Kepler's law (c) Ohm's law (d) Coulomb's law 15. The repulsive force between two like charges of 1 coulomb each separated by a distance of 1 m in vacuum is equal to : (a) 9 x 109 N (b) 109 N (c) 9 X 10-9 N (d) 9 N 16. 2 Marks 10 x 2 = 20 17. Define ‘Electric field’. 18. Define ‘electrostatic potential”. 19. Define ‘electrostatic potential energy’. 20. Define ‘electric flux’. 21. What is corona discharge? 22. What is dielectric breakdown. 23. A charge Q μc is placed at the centre of a cube what would be the (i) flux through one face? (ii) flux passing through two opposite faces of the cube? Electric flux through whole cube $\frac { Q }{ { \varepsilon }_{ 0 } }$ 24. Represent the variation of electric field due to point charge Q with a) magnitude of charge Q b) r and c) $\frac{1}{r^2}$ where r is the distance of the observation point from the charge. graphically. 25. Aparallel plate capacitor is charged by a battery after some time, the battery is disconnected and a dielectric slab with its thickness equal to the plate so reparation is insected between the plates. How will (i) the capacitance of the capacitor (ii) potential difference between the plates & (iii) the energy stored in the capacitor the affected? Qo - charge; Vo - potential difference, Co - capacitance, Eo- electric field. U0 - energy spred, before the dielectric slab is inserted. ${ Q }_{ 0 }={ C }_{ 0 }{ V }_{ 0 };\frac { { B }_{ 0 } }{ d } ;{ U }_{ 0 }=\frac { 1 }{ 2 } { C }_{ 0 }{ v }_{ 0 }^{ 2 }$ 26. Gauss law is true for any closed surface, no matter what its shape or size is. Justify. 27. 3 Marks 10 x 3 = 30 28. Two small-sized identical equally charged spheres, each having mass 1 mg are hanging in equilibrium as shown in the figure. The length of each string is 10 cm and the angle θ is 7° with the vertical. Calculate the magnitude of the charge in each sphere.(Take g = 10 ms−2) 29. Calculate the electric field at points P, Q for the following two cases, as shown in the figure. (a) A positive point charge +1 μC is placed at the origin. (b) A negative point charge -2 μC is placed at the origin. 30. A block of mass m and positive charge q is placed on an insulated frictionless inclined plane as shown in the figure. A uniform electric field E is applied parallel to the inclined surface such that the block is at rest. Calculate the magnitude of the electric field E. 31. Derive an expression for electrostatic potential due to a point charge. 32. Obtain an expression for potential energy due to a collection of three point charges which are separated by finite distances. 33. Obtain the expression for energy stored in the parallel plate capacitor. 34. Derive the expressions for the potential energy of a system of point charges. 35. How is electric flux is related to electric field. 36. Write the special features of Gauss law. 37. Define and derive an expression for the energy density in parallel plate capacitor. 38. 5 Marks 8 x 5 = 40 39. Consider a point charge +q placed at the origin and another point charge -2q placed at a distance of 9 m from the charge +q. Determine the point between the two charges at which electric potential is zero. 40. (i) In figure (a), calculate the electric flux through the closed areas A1 and A2. (ii) In figure (b), calculate the electric flux through the cube. 41. When two objects are rubbed with each other, approximately a charge of 50 nC can be produced in each object. Calculate the number of electrons that must be transferred to produce this charge. 42. Explain in detail the Electrostatic Potential difference between the charges. 43. A thin metallic spherical shell of radius R carries a charge Q on its surface. A point charge $\frac{Q}{2}$ is placed at the centre C and another is placed at the centre C and another a distance x from the centre as shown in the figure (i) Find the electric flux through the shell. (ii) Find the force on the charges at C and A. 44. How many electrons are there in one coulomb of negative charge? 45. An electric dipole of length 4cm, when placed with its axis making an angle of 60° with a uniform electric field, experiences a torque of 4√3 Nm. Calculate the potential energy of the dipole, if it has charge ±8nC. 46. Two insulated charged copper sphere A and B have their centres separated by a distance of 50cm. (i) What is the force of electrostatic repulsion, if the charge on each is 6.5 X 10-7C and the radii of A and B are negligible compared to the distance of separation? (ii) What is the force of repulsion, if each sphere is charged double the above amount and the distance between them is halved?
1 echo -n "Hi . How are you?" | tl -name Shane ## This is similar to python f-strings 1 2 3 name = "Eric" age = 74 print(f"Hello, {name}. You are {age}.") Hello, Eric. You are 74. In fact, I should make an f-strings-based utility. ## Build the replace-substring script 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 #!/bin/bash export TTY # * This is great for templates # cat $HOME/scripts/replace-substring | replace-substring -m "o" -r "$(echo "hi\nyo")" pat= replacement= IGNORECASE=0 while [ $# -gt 0 ]; do opt="$1"; case "$opt" in -m) { pat="$2" pat="$(p "$pat" | esc '\')" shift shift } ;; -r) { replacement="$2" shift shift } ;; -i) { IGNORECASE=1 shift } ;; *) break; esac; done awk -v IGNORECASE=$IGNORECASE -v pat="$pat" -v r="$replacement" '{gsub(pat,r)}1' ## Build the tl script 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 #!/bin/bash export TTY stdin_exists() { ! [ -t 0 ] } if stdin_exists; then fp="$(tf txt)" else echo "requires stdin" 1>&2 exit 1 fi while [$# -gt 0 ]; do opt="$1"; case "$opt" in -[a-z]*) { varname="$(p "$opt" | mcut -d- -f2)" contents="$2" shift shift varname="$(p "$varname" | sed 's/\s\+//g')" # no spaces varname="$(p "$varname" | sed -e 's/$$.*$$/\L\1/')" # lowercase varname="$(p "$varname" | fuzzify-regex -s)" # let params match fields with spaces cat "$fp" | ptw replace-substring -i -m "<$varname>" -r "$contents" | sponge "$fp" # perform variable replacement cat "$fp" | sed 's/$$\b1 [a-zA-Z]\+$$s\b/\1/g' | sponge "$fp" # fix singular plural cat "$fp" | sed 's/""/"/g' | sponge "$fp" # fix CSV double quote } ;; *) break; esac; done cat "$fp" ## Example 1 echo -n "Hi . How are you?" | tl -name Shane Hi Shane. How are you? ## Example of usage 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 IFS= read -r -d '' list <) { lang= } ;; HEREDOC printf -- "%s\n" "$list" | awk1 | while IFS=$'\n' read -r line && test -n "$line"; do lastopt="$(printf -- "%s\n" "$line" | sed 's/.*|//')" printf -- "%s\n" "$template" | tl -options "$line" -lastoption "$lastopt" done fp|functional-programming) { lang=functional-programming } ;; } ;; tc|torch|pytorch) { lang=pytorch } ;; dj|django) { lang=django } ;; rl|reinforcement-learning) { lang=reinforcement-learning } ;; dl|deep-learning) { lang=deep-learning } ;; nlp|natural-language-processing) { lang=natural-language-processing } ;; tf|tensorflow) { lang=tensorflow } ;; clj|clojure) { lang=clojure } ;; sy|spacy) { lang=spacy } ;; codegen|code-generation) { lang=code-generation } ;; rpl|rosie) { lang=rosie } ;; ps|purs|purescript) { lang=purescript } ;; ds|datascience|data-science) { lang=data-science } ;; py|python) { lang=python } ;; tu|turtle) { lang=turtle } ;; pb|problog) { lang=problog } ;; mp|megaparsack) { lang=megaparsack } ;; rkt|racket) { lang=racket } ;; js|javascript) { lang=javascript } ;; gh|github) { lang=github } ;; } ;; ml|machine-learning) { lang=machine-learning } ;; ts|typescript) { lang=typescript } ;; pl|perl) { lang=perl } ;;
Lesson 2 # Basic syntax 0 Now that you have a grip on the basics, we're going to dive deeper into the following areas: 1. Variables and variable definition 2. MATLAB as a calculator 3. Data types and formats 4. Operators and special characters 6. User input from the keyboard 7. Displaying the variable as output ## Variables and variable definition Variables are used to store information in memory and give it a special label. You can define variables using the assignment operator (=). Note that MATLAB variables can be assigned without declaring their type, and that the type can change during the execution of your program. Below is an example of how you'd declare a variable called n and give it a value of 10: In the above code snippet, MATLAB will actually create a $1\times 1$ matrix under the hood (since it's a Matrix Language). If you don't give your variable a name, it will automatically be assigned to the default one called ans: Note: A valid variable name in MATLAB starts with a letter, and can be followed by letters, digits or underscores (_). Remember that MATLAB is case-sensitive, so A and a are not the same variable! ## MATLAB as a Calculator The MATLAB environment is pretty much like a super smart calculator, and can perform all the mathematical operations that you're used to doing in school. It's also what's known as an interpreted environment, which means that you can give it any command in the command window, and it will be executed it right away. Let's see how it works! Type a valid mathematical expression, for example: When you press "enter", MATLAB executes it immediately and the result of your operation is returned: Here's another more complex example: You can also solve real world mathematical problems. For example, let's calculate the number of of seconds in the month of January. We know that January has $31$ days, each day has $24$ hours, each hour has $60$ minutes and each minute has $60$ seconds. In the MATLAB command window, we can define variables for all of these values and make it easy for anyone to follow the logic of our program: Notice how defining variables makes your program much easier to read? Compare that to a program with a single line: Use this example as a reminder that your program isn't always better just because it's shorter. ## Data types When working on a program, it's always important to keep track of the data type of your variables, or else you might end up with unexpected results! As an example, if you add two integers together (2 + 2), the result is 4, but if you add two (seemingly identical) numbers which are actually strings ("2" + "2"), the result will be "22"! Let's first go through the different types of numbers that exist in MATLAB: And here are the available data types that you can use to store them: As we saw earlier, the reason why these data types are so important is because the result of your operation sometimes depends on how precise you want to be. Consider the following product 23.5*19.234. The correct result of this operation is 451.999. This works fine when we use the single and double data types: But notice how we lose precious data if we use other data types: As you can see, integer data types (like int16 and int32) can only handle whole numbers, which explains why their result is $452$ instead of $451.999$. int8 is worse, since it can't even handle a number that big, so it just displays its maximum value ($127$). Now let's move on to text values. Strings and characters are declared by typing the character in single quotes ('). Here's an example of how you can define a student's name and grade: It's important to note that all characters in MATLAB have an equivalent numerical value based on the American Standard Code for Information Interchange (also known as "ASCII"). This is what programmers call "encoding". For example, the name James King in MATLAB is actually a vector with 10 elements in it (including the space) as shown below: From this, you can see that J uses the ASCII code 74, a uses 97, m uses 109, and so on and so forth... Pretty cool! ## Formatting The format of your output is controlled by the format command which can be used both in line spacing and numerical formats. You might have noticed in the previous example that the command window contains blank lines in between the execution lines. This is called loose format and is the default option that's used to make the output more readable. If you want to remove this default behaviour, you can use the compact format instead: For the numeric variables, there are a number of format styles used to display the output. ## Operators and Special Characters An operator is a symbol that tells the compiler to perform a specific mathematical or logical manipulation. The most common operators in MATLAB are: • Arithmetic Operators (addition, subtraction, multiplication... etc) • Logical Operators (if, else, true, false... etc) • Relational Operators (greater than, less than... etc) MATLAB arithmetic operations can be subdivided into matrix arithmetic operations and array arithmetic operations. The matrix arithmetic operations are as defined in linear algebra (+,-,*,/,^): Array operators use the . symbol, and we'll look into them more closely in the next chapter. Just like artithmetic operators, relational operators can also work on both scalar and non-scalar data. Relational operators for arrays perform element-by-element comparisons between two arrays and return a logical array of the same size. The logical operators can be either element-wise logical operators (operate element-by-element on logical arrays) e.g. &amp;,| and ~ or short-circuit logical operators e.g. &amp;&amp; and ||. Operators in MATLAB have the same order of precedence as in standard linear algebra. Below is a list from the "most important" operator to the least: 1. Parentheses () 2. Transpose .´, power .^, complex conjugate transpose ´, matrix power ^ 3. Power with unary minus.-, unary plus .+, or logical negation ~ as well as matrix power with unary minus ^-, unary plus ^+, or logical negation ^~ 4. Unary plus +, unary minus -, logical negation ~ 5. Multiplication .*, right division ./, left division .\, matrix multiplication *, matrix right division /, matrix left division \ 6. Addition +, subtraction - 7. Colon operator : 8. Less than &lt;, less than or equal to&lt;=, greater than&gt; , greater than or equal to &gt;=, equal to==, not equal to ~= 9. Element-wise AND &amp; 10. Element-wise OR | 11. Short-circuit AND &amp;&amp; 12. Short-circuit OR || There are also special characters that change the format of the display of code or the output in the command window. The table below summarizes the commonly used characters or symbols. From the example on calculating the seconds in the month in January, every execution is displayed as an output. This can be suppressed by the semi-colon as shown below: Comment are used to make the code understandable. It makes it easier for the code users to quickly get an idea of what the program is actually doing, and it's usually considered a good practice to add comments to your code. To put a comment within a line, type % followed by the comment text. MATLAB compiler treats all the information after the % on a line as a comment. For a lengthy comment that results in the the group of lines, type %{ before the first line and %} after the last line you want to comment. For example: ## User inputs from the Keyboard The command we have dealt with required us to assign the number/string or constant to the variables when we initialized them. We can however, prompt the user to input the values to the variable from the keyboard in MATLAB using the function input(). The syntax is x = input(prompt).Whenever this command is executed, the text in prompt is displayed and waits for the user to input a value. This commands are best implemented in a script. We will solve the problem that asks us to compute the seconds in the month of January using the script code_two_0 below ( as keyed in in the command editor) When the script code_two_0 is run the on the command window the user is prompted to key in the values to the days, hours, mins and secs. before the answer is calculated as shown below. ## Displaying the variable as Output From the previous example, the answer 2678400 is not clearly presented. One can't be able to tell what is it. It is just a numerical figure to many who have no idea about pr0graming. MATLAB has inbuilt functions, disp() or fprintf() to display output to the screen (command window). The disp() function is the easiest to use but it has limited control of output formatting. It displays exactly one value/variable to the screen on one or more lines.This function is convenient for displaying vectors, matrices, and complex numbers . It display the entire variable within a single function call. In general, use disp() when you do not care about the exact format of the outputFor example All the commands in code_two_0 script on number of seconds in January,can be suppressed by the semicolon and the final answer displayed using dips() as shown. Save this script as code_two_1 The output in the command window when code_two_1 is run is The fprintf() function is more complex but it allows you precise control over the output formatting. It can receive one or more arguments. Its syntax is fprintf(str,variable_list) The first argument, str, usually format string (should be in quotes). The format string should contain a format specifier/descriptor for each variable in variable_list. A format descriptor starts with the special character %, followed by an integer giving the width of the field to be printed. The fieldWidth - is an integer that specifies how many spaces (or decimal points) to use for the value. In the format string, special escape sequences are used to represent "non-printable" characters. This special escape sequences, begin with the backslash '\' character. Each variable in the variable_list must point to the corresponding specifier. For example, The format sprint allows the user to add additional text that will help describe the output further. In code_two_1 replace the thedisp() function withfprintf() function and save the script as code_two_2 as shown below The output in the command window when code_two_2 is run is Another example (in command window directly), If John bought three books, at \$100 each. Let us write MATLAB commands to calculate and display the total cost John incurred. ## Part A: Math Matlab makes an excellent calculator. Try entering some basic math expressions at the prompt in the command window: Note: The last command gives the remainder when the first value is divided by the second value i.e. 89 divide by 11 ## Part B: Scripts 1. Write a script in in Matlab, to compute the number of seconds in a leap year. Form your output using the method learned in this chapter. 2. Write a script in Matlab to display the bio data of a student. The script should prompt the user to input the student' surname, age, admission number, class, weight, height and display the result in the command window.
## [FIXED] Change Live Histogram not available during stacking A place to report problems and bugs in SharpCap Forum rules Please include the following details in any bug report: * Version of SharpCap * Camera and other hardware being user * Operating system version * Contents of the SharpCap log after the problem has occurred. [If SharpCap crashes, please send the bug report when prompted instead of including the log] donboy Posts: 98 Joined: Sat Mar 25, 2017 1:57 am ### Re: Change Live Histogram not available during stacking Hi Robin, My setup has been consistent and usually with the first object after opening SC the live stack histo is immediately responsive to change. I lose immediate responsiveness when I go to another object and start stacking. Issue started with v 3.1.5098 Setup: v. 3.1.5098 & v.3.1.5108 Windows10 Pro v.1709 (was v.1703 before these latest updates were installed) SSD and 10gb memory 6 core Xenon duocore processor (12cpus) buffers report 3 or 4 of 45 ASI294Pro USB3 Raw16 Bin1 Default or Sigma Kappa doesn't seem to make a difference gain 300 15sec to 60sec exposure mostly without darks or flats but have used darks and flats and doesn't seem to change the issue I have a feeling that the Windows 10 update to v.1709 maybe the culprit. Don Dale R. Posts: 7 Joined: Wed Mar 28, 2018 4:35 pm ### Re: Change Live Histogram not available during stacking I can add to my other post: Laptop..I7-770hq@2.80 GHZ Ram: 16gb Vid card NVidia GTX1050 win10 is at1709 like Don's gain 350-375..darks and no darks..4sec-20sec. I was using ZWO224 camera.RGB24 bin 1 when it happened, but changing to all the other options did not help. I can't get it to error on the demo deepsky camera. If the camera settings are being saved after a "complete" uninstall I wonder if some error is being added back into to any re-installs??? The only way I got it to work, no restarting worked, no reinstalling the same version, just reverting back to 5098. After that ver. starting acting up , I did not try again. next time out i will and if it happens agin, I will try the ver. you suggest. Strange i had no problems for 30 or so stacked DSO's and then boom,it pops up. The only thing I can add for some reason when I started I got an update notice that I did not download. I ignored."ask again later" or whatever the option is.i thought I was updated with 5108..... roelb Posts: 66 Joined: Wed Mar 08, 2017 11:36 pm ### Re: Change Live Histogram not available during stacking I don't think that the latest Windows update "1709" may be a source of the issue, because I had the problem also with the old Windows "1703" version. Roel Celestron Nexstar Evolution 8 - Celestron Nexstar 6 SE - OTA 5 SE - StarSense Starlight Xpress Lodestar X2 Mono -- ATIK Infinity Colour -- ZWO ASI290MM -- ZWO ASI224MC Posts: 2142 Joined: Sat Feb 11, 2017 3:52 pm Location: Vale of the White Horse, UK Contact: ### Re: Change Live Histogram not available during stacking Hi, please can people try build 3.1.5115. If you have suffered from this problem then please turn on the new option 'Record extra information in the log for troubleshooting' in the general settings before you start stacking. Please also show the log window (from the help menu) and keep an eye on it. With this new option set there will be a lot of logging when you adjust the live stack histogram - you should see something like Code: Select all Received image update request update already running, setting request flag and ignoring Re-displaying frame due to image update request Image update requested while previous was in progress, re-running FrameProcessor update request received at the ImageProvider - passing on ImageUpdateEvent Re-displaying frame due to image update request Every time you move one of the level bars you will get a 'Received image update request' line - many will happen as you drag. Not all of these will result in the display being updated, since often they arrive so quickly that SharpCap is still busy working on the last one when the next arrives. When this happens SharpCap sets a reminder to itself to re-update when the current update is over. At the end, after every movement of the sliders there should be a line that ends with the 'Re-displaying frame due to image update request' message. If the histogram stops responding it is most likely that the last 're-displaying' message will also stop showing. If that happens, please send me the full log and I can hopefully work out where the request is getting lost or stuck from what other messages are showing. To be honest, if you have the problem happen to you while you have the extra logging turned on then please send me the whole log *and* the time that the problem happened (so that I know where in the log to look!). Now, having said all that I did find one place where - just maybe - under some odd circumstances the code could get stuck not passing on the requests until you shut down and re-started SharpCap. I fixed that. What I found could not have had an effect that lasted over a restart of SharpCap though. On the subject of the camera settings being saved over re-install, this is supposed to happen. SharpCap 3.1 auto-saves settings when you close down the camera in a hidden capture profile and re-loads them when you open the same camera. Capture profiles are not deleted on uninstall or upgrades, so this profile is still used after an uninstall and reinstall. You can turn this feature off in the SharpCap settings if it annoys you. I don't think the settings being restored will bring the problem back, but there are certain camera settings that could have an impact on whether a problem like this shows or not -these are * Capture Area * Colour Space * Exposure Length * Dark Subtraction * Flat Correction I think that other settings are unlikely to have any impact. Please also check that the Status listed in the 'Information' box in the live stacking area shows as 'Waiting for Next Frame' for most of the time. In the 'waiting' status, SharpCap is able to respond to histogram adjustments. In other states it may not be able to respond due to changes being made to the stack to process the current frame. If the status shows 'Calculating Alignment' most of the time - or some other status the problem is lack of CPU speed or too much CPU in use by other processes or too fast a frame rate. hope this helps, Robin Dale R. Posts: 7 Joined: Wed Mar 28, 2018 4:35 pm ### Re: Change Live Histogram not available during stacking Will do as requested..now waiting for clears skies of course Thanks for the great program and constant improvements....not common out there these days! roelb Posts: 66 Joined: Wed Mar 08, 2017 11:36 pm ### Re: Change Live Histogram not available during stacking I've done daily tests with SC version 3.1.515.0. On 3 different PC's with the ASI290MM & ASI224MC cameras and the DSO Test Camera. So far the problem has not occurred. I'll keep testing on a daily base. Roel Celestron Nexstar Evolution 8 - Celestron Nexstar 6 SE - OTA 5 SE - StarSense Starlight Xpress Lodestar X2 Mono -- ATIK Infinity Colour -- ZWO ASI290MM -- ZWO ASI224MC Posts: 2142 Joined: Sat Feb 11, 2017 3:52 pm Location: Vale of the White Horse, UK Contact: ### Re: Change Live Histogram not available during stacking Hi Roel, thanks for the update - let's hope the change I made did fix the issue cheers, Robin Dale R. Posts: 7 Joined: Wed Mar 28, 2018 4:35 pm ### Re: Change Live Histogram not available during stacking Sorry it took so long to get back to you...we have this thing called BLIZZARDS in the US, so I have been stuck in the house. I will not go out in the cold until next year. I have the latest version and it works fine again.. Thank YOU!!!! Posts: 2142 Joined: Sat Feb 11, 2017 3:52 pm Location: Vale of the White Horse, UK Contact: ### Re: Change Live Histogram not available during stacking That's excellent news. I have an update to this already written as part of SharpCap 3.2 that will make the update more responsive, so that should help too. thanks, Robin ### Who is online Users browsing this forum: No registered users and 2 guests
# Linear Approximation Calculator Linear Approximation Calculator is a free online tool that displays the linear approximation for the given function. BYJU’S online linear approximation calculator tool makes the calculation faster, and it displays the linear approximation in a fraction of seconds. ## How to Use the Linear Approximation Calculator? The procedure to use the linear approximation calculator is as follows: Step 1:Enter the function, variable and its expansion point in the respective input fields Step 2: Now click the button “Find the Linear Approximation” to get the output Step 3:Finally, the linear approximation for the given function will be displayed in the new window ### What is Meant by Linear Approximation? A linear approximation is a method of determining the value of the function f(x), nearer to the point x = a. This method is also known as the tangent line approximation. In other words, the linear approximation is the process of finding the line equation which should be the closet estimation for a function at the given value of x. The linear approximation formula for the function is given by: f(x) ≈ L(x) = f(a) + f’(a)(x-a) ### Example Question: Determine the value of the f (3.5) using the linear approximation at a=2, whose function f(x) is differentiable such that f(3)= 12, and f’(3) = -2. Solution: We know that the linear approximation formula is f(x) ≈ L(x) = f(a) + f’(a)(x-a) Now, substitute the values in the formula, we get L(x) = f(3) + f’(3)(x-3) = 18-2x Hence, f(3.5)= 18-2(3.5) f(3.5)= 18 – 7 f(3.5) = 11
# Identifying structure of student essays ## overview Students must be able to read scientific texts with deep understanding and create coherent explanations that connect causes to events. Can we use Natural Language Processing to evaluate whether they're doing that based on essays they write? ## research questions 1. Can we automatically identify important concepts in students' scientific explanations? 2. Can we identify causal relations in their explanations? 3. Can we use these to identify an essay's causal structure? 4. How many training examples are necessary? 5. Can we automatically assemble training materials for a new topic? ## people • Simon Hughes, PhD defended in 2019. • Clayton Cohn, MS Thesis defended in 2020. • Keith Cochran, PhD student, in progress. • Noriko Tomuro, associate. • M. Anne Britt, NIU, collaborator. ## progress 1. Simon Hughes's dissertation demonstrated $$F_1$$ scores averaging 0.84 for RQ 1 with bi-directional RNN. 2. Simon demonstrated $$F_1$$ scores between .73 and .79 using a bi-directional RNN, and a novel shift-reduce parser. 3. Simon created a re-ranking approach that scored between .75 and .83 on identifying the entire essay structure (Fig. 1). 4. Hastings, et al. showed that 100 annotated essays produced a signicant portion of the performance that 1000 essays did. 5. Working on it. ## future work • Application of deep learning transformers like BERT to tasks above, and expanding to other domains. • Developing specialized deep learning methods for inferring text structure. • Exploring ensembling methods.
This function works by bundling source package, and then uploading to https://win-builder.r-project.org/. Once building is complete you'll receive a link to the built package in the email address listed in the maintainer field. It usually takes around 30 minutes. As a side effect, win-build also runs R CMD check on the package, so check_win is also useful to check that your package is ok on windows. check_win_devel( pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ... ) check_win_release( pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ... ) check_win_oldrelease( pkg = ".", args = NULL, manual = TRUE, email = NULL, quiet = FALSE, ... ) ## Arguments pkg The package to use, can be a file path to the package or a package object. See as.package() for more information. An optional character vector of additional command line arguments to be passed to R CMD build if binary = FALSE, or R CMD install if binary = TRUE. For source packages: if FALSE, don't build PDF vignettes (--no-build-vignettes) or manual (--no-manual). An alternative email to use, default NULL uses the package Maintainer's email. If TRUE, suppresses output. Additional arguments passed to pkgbuild::build(). ## Functions • check_win_devel: Check package on the development version of R. • check_win_release: Check package on the release version of R. • check_win_oldrelease: Check package on the previous major release version of R. Other build functions: check_rhub()
# Sequences and Series #### Sequences A sequence is an ordered list of numbers such as $a_1, a_2, a_3, \dots$ formed according to a definite rule. Each member in this ordered list is called and "element" or "term" of the sequence. The sequence is defined by the number of terms it contains as either finite or infinite. The list of positive, even numbers $2, 4, 6, 8, \dots$ is a typical example of an infinite sequence. The dots indicate that the sequence continues forever, with no last term. The list of positive, even numbers less that $100$ - $2, 4, 6, 8, \dots , 98$ - is an example of a finite sequence. The first term in this sequence is $2$ and the last term is $98$. ##### Arithmetic sequences An arithmetic sequence is a sequence where each term is obtained from the preceding one by adding a constant amount. This constant is called the common difference and is often represented by the symbol $d$. The value of $d$ can be positive, negative or zero. The sequence is of the form $$a,\; a+d,\; a+2d,\; a+3d, \dots$$ where $a$ is the first term of the sequence. The $n^\text{th}$ term of an arithmetic sequence is given by $$a_n=a+(n-1)d$$. ##### Geometric sequences A geometric sequence is a sequence where each term is obtained from the preceding one by multiplying by a constant amount. This constant is called a common ratio and is often represented by the symbol $r$. The value of $r$ can be positive or negative ($r=0$ gives a trivial sequence with only one term). The terms of a geometric sequence with negative $r$ will oscillate between positive and negative values. The sequence is of the form $$a,\; ar,\; ar^2,\; ar^3, \dots$$. The $n^\text{th}$ term of a geometric sequence is given by $$a_n=ar^{(n-1)}$$. #### Series A series is obtained by adding up the terms of a sequence. If all the terms of a finite sequence are added it is called a finite series. For example: If $a_1,\; a_2,\; a_3,\dots$ is an infinite sequence , then the corresponding series is $$a_1+a_2+a_3+\cdots$$ The sum of the first $n$ terms of an infinite series is written as $$S_n=a_1+ a_2+ a_3 +\dots +a_n =\sum_{j=1}^na_j$$ ##### Arithmetic series An arithmetic series is a series where the difference between successive terms is a constant. The sum of the first $n$ terms of an arithmetic series with first term $a_1=a$ and last term $a_n=l$ is given by $$S_n=\frac{n}{2}\left( a+l\right).$$ In terms of the common difference between terms, $d$, (see section on sequences above) this can also be written as $$S_n=\frac{n}{2}\left( 2a+(n-1)d\right).$$ ##### Geometric series A geometric series is a series where each term is obtained from the preceding one by multiplying by a constant. The sum of the the first $n$ terms of a geometric series with first term $a_1=a$ and common ratio $=r$ is given by $$S_n=\frac{a(1-r^n)}{1-r},\text{ for }r\ne 1.$$ In the case where $|r|\lt 1$ it can be shown that this sum will tend to $$S_\infty =\frac{a}{1-r}.$$ ##### Double series A double series is a series depending on two indices such as $$\sum_{i,j}a_{i,j}.$$ If the terms of the series can be written as the product of two single subscript series, that is if $a_{i,j}=x_iy_j$, then a finite double series can be written as a product of series. For example: \begin{align*} \sum_{i=1}^m \sum_{j=1}^n x_iy_j&=x_1y_1+x_1y_2+\cdots +x_1y_n+x_2y_1+x_2y_2+\cdots \\ &=(x_1+x_2+\cdots +x_m)y_1+(x_1+x_2+\cdots +x_m)y_2+\cdots \\ &=\left(\sum_{i=1}^m x_i\right)(y_1+y_2+\cdots +y_n) \\ &=\left(\sum_{i=1}^m x_i\right)\left(\sum_{j=1}^n y_j\right) \end{align*}
# Find the equation of the streamline passing through the point $$(5,3)$$ at t =3 seconds. A velocity field is given by $$V=3yt\hat{i}+5x\hat{j}$$. Find the equation of the streamline passing through the point $$(5,3)$$ at $$t =3$$. Units of $$x,y$$ are in meters and time-$$t$$ is in seconds. Asked on 5th May 2021 in A streamline in a fluid flow Velocity field is $$V=\left ( 3yt\hat{i}+5x\hat{j} \right )$$ In a streamline flow, velocity is tangent to the flow. $$V$$ and $$dr$$ is in the same direction. Therefore, $V\times dr=0$Since, two vectors are in the same direction, their cross product is $$0$$. $V\times dr = 0$$\Rightarrow \left ( 3yt\hat{i}+5x\hat{j} \right )\times \left ( dx\hat{i}+dy\hat{j} \right )=0$ $\Rightarrow \begin{vmatrix} \hat{i} &\hat{j} &\hat{k} \\ 3yt& 5x &0 \\ dx&dy & 0 \end{vmatrix}=0$ $\Rightarrow \hat{k}\left ( 3ytdy-5xdx \right )=0$ At, time $$t = 3$$ $\Rightarrow 9y\left ( 3 \right )dy -5xdx=0$$\Rightarrow 9ydy -5xdx=0$$\Rightarrow 9ydy=5xdx$ On integrating $\Rightarrow \int 9ydy=\int 5xdx$ $\Rightarrow 9\int ydy=5\int xdx$ $\Rightarrow 9\frac{y^{2}}{2}+c_{1}=\frac{5x^{2}}{2}+c_{2}$ $\Rightarrow \frac{9y^{2}+2c_{1}}{2}=\frac{5x^{2}+2c_{2}}{2}$ $\Rightarrow 9y^{2}+2c_{1}=5x^{2}+2c_{2}$ $\Rightarrow 9y^{2}-5x^{2}=2c_{2}-2c_{1}$ $\Rightarrow 9y^{2}-5x^{2}=C$ At, point $$(5,3)$$, since, $9y^{2}-5x^{2}=C$$\Rightarrow C= 9\left ( 3 \right )^{2}-5\left ( 5 \right )^{2}$$\Rightarrow C=\left ( 9\times 9 \right )-\left ( 5\times 25 \right )$$\Rightarrow C=81-125=-44$Therefore, the equation of the streamline is, $9y^{2}-5x^{2}=-44$$\Rightarrow 9y^{2}-5x^{2}+44=0$ Answered on 13th May 2021.
Friday, March 21, 2008 ... // Three preprints on cosmoclimatology During the last week, there have been three cosmoclimatological preprints by two teams on the arXiv. Rusov et al. (Ukraine) argue that all observed climate change at the timescale of millenia and millions of years can be explained by two factors, namely the the solar output and the galactic cosmic ray flux that determines the cloudiness. Rusov et al. I (PDF) Rusov et al. II (PDF) The first paper studies the cloud condensation in terms of refined equations of the Twomey effect. In the second paper, their bifurcation model is compared with the EPICA data. Contrarians On the other hand, Terry Sloan (Lancaster) and A.W. Wolfendale (Durham) show that in some places, the cloud cover is anticorrelated with the cosmic ray flux relatively to what is expected. At the 95% confidence level, they claim that less than 23% of the changes of the cloudiness during the 11-year cycles is caused by cosmic rays. Sloan & Wolfendale (PDF) I personally find all preprints plausible to some extent. It is very conceivable that cloudiness is controlled by a lot of things including largely unpredictable drivers that you might call a noise. A very convincing criticism of the paper by Sloan and Wolfendale was written by Nir Shaviv in April 2008. Hat tip: Physics arXiv blog
## The Annals of Probability ### Continuity of $l^2$-Valued Ornstein-Uhlenbeck Processes #### Abstract A stationary $l^2$-valued Ornstein-Uhlenbeck process is considered which is given formally by $dX_t = -AX_t dt + \sqrt 2a dB_t$, where $A$ is a positive self-adjoint operator on $l^2, B_t$ is a cylindrical Brownian motion on $l^2$ and $a$ is a positive diagonal operator on $l^2$. A simple criterion is given for the almost-sure continuity of $X_t$ in $l^2$ which is shown to be quite sharp. Furthermore, in certain special cases, we obtain simple necessary and sufficient conditions for the almost-sure continuity of $X_t$ in $l^2$. #### Article information Source Ann. Probab., Volume 18, Number 1 (1990), 68-84. Dates First available in Project Euclid: 19 April 2007 https://projecteuclid.org/euclid.aop/1176990938 Digital Object Identifier doi:10.1214/aop/1176990938 Mathematical Reviews number (MathSciNet) MR1043937 Zentralblatt MATH identifier 0699.60052 JSTOR Iscoe, I.; Marcus, M. B.; McDonald, D.; Talagrand, M.; Zinn, J. Continuity of $l^2$-Valued Ornstein-Uhlenbeck Processes. Ann. Probab. 18 (1990), no. 1, 68--84. doi:10.1214/aop/1176990938. https://projecteuclid.org/euclid.aop/1176990938
0 Research Papers # Mesoscopic Investigation of the Heterogeneities Induced by Channel-Die Compression [+] Author and Article Information Michel G. Darrieulat Ecole Nationale Supérieure des Mines de Saint-Etienne, Centre “Sciences des Matériaux et des Structures,” UMR CNRS No. 5146, 158 cours Fauriel, 42023 Saint-Etienne, Cedex 2, Francedarrieulat@emse.fr Khalid Zhani Faculté des Sciences et Techniques de Tanger, Université Abdelmalek Essâadi, B.P. 416, Tanger, Moroccok_zhani@hotmail.com René-Yves Fillit Ecole Nationale Supérieure des Mines de Saint-Etienne, Centre “Sciences des Matériaux et des Structures,” UMR CNRS No. 5146, 158 cours Fauriel, 42023 Saint-Etienne, Cedex 2, Francefillit@emse.fr Ahmed Chenaoui Faculté des Sciences et Techniques de Tanger, Université Abdelmalek Essâadi, B.P. 416, Tanger, Moroccoa_chenaoui@hotmail.com J. Eng. Mater. Technol 131(3), 031005 (May 22, 2009) (9 pages) doi:10.1115/1.3120390 History: Received September 10, 2008; Revised January 21, 2009; Published May 22, 2009 ## Abstract Experimental techniques were applied to study the heterogeneities of deformation of metals at the mesoscopic scale (typically $100 μm$ in the present case). The first are fiducial carbon grids that are transferred on to the surface of the test-pieces. Here, they were used on single and polycrystals deformed in channel-die compression. They prove efficient for strains above 1. They bring out the role of the corners of the samples, which trigger bands of deformation that grow in importance as the compression goes on. They put in evidence the mesoheterogeneities that appear in the mechanical behavior of a few highly symmetric orientations such as cube. The second technique is the use of microfocused X-rays, which give the crystallographic orientation at the same scale of $100 μm$ and can work in the presence of the carbon grids even when there is considerable strain hardening. The gradients found in the lattice rotations are far less pronounced than the sharp localizations in the displacement field. This highlights the importance of the rotations due to the activity of the slip systems. <> ## Figures Figure 1 Principle of the channel-die compression test Figure 2 Deformation of the grids and typical defects Figure 3 Sequence of deformation of a polycrystalline sample Figure 4 Deformation of remarkable single crystals Figure 5 Microfocus X-rays on the brass orientation Figure 6 Microfocus X-rays on the U orientation Figure 7 Microfocus X-rays on the copper orientation ## Discussions Some tools below are only available to our subscribers or users with an online account. ### Related Content Customize your page view by dragging and repositioning the boxes below. Related Journal Articles Related Proceedings Articles Related eBook Content Topic Collections
Article Contents Article Contents # Upper risk bounds in internal factor models with constrained specification sets • For the class of (partially specified) internal risk factor models we establish strongly simplified supermodular ordering results in comparison to the case of general risk factor models. This allows us to derive meaningful and improved risk bounds for the joint portfolio in risk factor models with dependence information given by constrained specification sets for the copulas of the risk components and the systemic risk factor. The proof of our main comparison result is not standard. It is based on grid copula approximation of upper products of copulas and on the theory of mass transfers. An application to real market data shows considerable improvement over the standard method. Citation: • [1] Aas, K., C. Czado, A. Frigessi, and H. Bakken. (2009). Pair-copula constructions of multiple dependence, Insur. Math. Econ. 44, no. 2, 182–198. [2] Ansari, J. (2019). Ordering risk bounds in partially specified factor models, University of Freiburg, Dissertation. [3] Ansari, J. and L. Rüschendorf. (2016). Ordering results for risk bounds and cost-efficient payoffs in partially specified risk factor models, Methodol. Comput. Appl. Probab., 1–22. [4] Ansari, J. and L. Rüschendorf. (2018). Ordering risk bounds in factor models, Depend. Model. 6.1, 259– 287. [5] Bäuerle, N. and A. Müller. (2006). Stochastic orders and risk measures: consistency and bounds, Insur.Math. Econ. 38, no. 1, 132–148. [6] Bernard, C. and S. Vanduffel. (2015). A new approach to assessing model risk in high dimensions, J. Bank.Financ. 58, 166–178. [7] Bernard, C., L. Rüschendorf, and S. Vanduffel. (2017a). Value-at-Risk bounds with variance constraints, J. Risk. Insur. 84, no. 3, 923–959. [8] Bernard, C., L. Rüschendorf, S. Vanduffel, and R. Wang. (2017b). Risk bounds for factor models, Financ.Stoch. 21, no. 3, 631–659. [9] Bernard, C., M. Denuit, and S. Vanduffel. (2018). Measuring portfolio risk under partial dependence information, J. Risk Insur. 85, no. 3, 843–863. [10] Bignozzi, V., G. Puccetti, and L Rüschendorf. (2015). Reducing model risk via positive and negative dependence assumptions, Insur. Math. Econ. 61, 17–26. [11] Cornilly, D., L. Rüschendorf, and S. Vanduffel. (2018). Upper bounds for strictly concave distortion risk measures on moment spaces, Insur Math Econ. 82, 141–151. [12] de Schepper, A. and B. Heijnen. (2010). How to estimate the Value at Risk under incomplete information, J. Comput. Appl. Math. 233, no. 9, 2213–2226. [13] Demarta, S. and A.J. McNeil. (2005). The t copula and related copulas, Int. Stat. Rev. 73, no. 1, 111–129. [14] Denuit, M., C. Genest, and E. Marceau. (1999). Stochastic bounds on sums of dependent risks, Insur. Math.Econ. 25, no. 1, 85–104. [15] Embrechts, P. and G. Puccetti. (2006). Bounds for functions of dependent risks, Financ. Stoch. 10, no. 3, 341–352. [16] Embrechts, P., G. Puccetti, and L. Rüschendorf. (2013). Model uncertainty and VaR aggregation, J. Banking Financ. 37, no. 8, 2750–2764. [17] Embrechts, P., G. Puccetti, L. Rüschendorf, R. Wang, and A. Beleraj. (2014). An academic response to basel 3.5, Risks 2, no. 1, 25–48. [18] Embrechts, P., B. Wang, and R. Wang. (2015). Aggregation-robustness and model uncertainty of regulatory risk measures, Financ. Stoch. 19, no. 4, 763–790. [19] Föllmer, H. and A. Schied. (2010). Convex and coherent risk measures., Encycl. Quant. Financ., 355–363. [20] Goovaerts, M.J., R. Kaas, and R.J.A. Laeven. (2011). Worst case risk measurement: back to the future?Insur. Math. Econ. 49, no. 3, 380–392. [21] Hürlimann, W. (2002). Analytical bounds for two Value-at-Risk functionals, ASTIN Bull. 32, no. 2, 235–265. [22] Hürlimann, W. (2008). Extremal moment methods and stochastic orders, Bol. Asoc. Mat. Venez. 15, no. 2, 153–301. [23] Kaas, R. and M.J. Goovaerts. (1986). Best bounds for positive distributions with fixed moments, Insur.Math. Econ. 5, 87–95. [24] McNeil, A.J., R. Frey, and P. Embrechts. (2015). Quantitative Risk Management. Concepts, Techniques and Tools., second edn, Princeton University Press, Princeton. [25] Müller, A. (1997). Stop-loss order for portfolios of dependent risks, Insur. Math. Econ. 21, no. 3, 219–223. [26] Müller, A. (2013). Duality theory and transfers for stochastic order relations. In: Stochastic orders in reliability and risk, Springer, New York. [27] Müller, A. and M. Scarsini. (2001). Stochastic comparison of random vectors with a common copula, Math. Oper. Res. 26, no. 4, 723–740. [28] Müller, A. and M. Scarsini. (2006). Stochastic order relations and lattices of probability measures, SIAM J. Optim. 16, no. 4, 1024–1043. [29] Müller, A. and D. Stoyan. (2002). Comparison Methods for Stochastic Models and Risks, Wiley, Chichester. [30] Nelsen, R.B. (2006). An introduction to copulas, 2nd ed, Springer, New York. [31] Nelsen, R.B., J.J. Quesada-Molina, J.A. Rodríguez-Lallena, and M. Úbeda-Flores. (2001). Bounds on bivariate distribution functions with given margins and measures of association, Commun. Stat.Theory Methods 30, no. 6, 1155–1162. [32] Puccetti, G. and L. Rüschendorf. (2012a). Bounds for joint portfolios of dependent risks, Stat. Risk. Model.Appl. Financ. Insur. 29, no. 2, 107–132. [33] Puccetti, G. and L. Rüschendorf. (2012b). Computation of sharp bounds on the distribution of a function of dependent risks, J. Comput. Appl. Math 236, no. 7, 1833–1840. [34] Puccetti, G. and L. Rüschendorf. (2013). Sharp bounds for sums of dependent risks, J. Appl. Probab. 50, no. 1, 42–53. [35] Puccetti, G., L. Rüschendorf, D. Small, and S. Vanduffel. (2017). Reduction of Value-at-Risk bounds via independence and variance information, Scand. Actuar. J. 2017, no. 3, 245–266. [36] Rüschendorf, L. (2009). On the distributional transform, Sklar’s theorem, and the empirical copula process, J. Stat. Plann. Inference 139, no. 11, 3921–3927. [37] Rüschendorf, L. (2013). Mathematical Risk Analysis, Springer, New York. [38] Rüschendorf, L. (2017a). Improved Hoeffding–Fréchet bounds and applications to VaR estimates. In:Copulas and Dependence Models with Applications. Contributions in Honor of Roger B. Nelsen(M. Úbeda Flores, E. de Amo Artero, F. Durante, and J. Fernández Sánchez, eds.), Springer, Cham. https://doi.org/10.1007/978-3-319-64221-5 12. [39] Rüschendorf, L. (2017b). Risk bounds and partial dependence information. In: From Statistics to Mathematical Finance, Springer, Festschrift in honour of Winfried Stute, Cham. [40] Rüschendorf, L. and J. Witting. (2017). VaR bounds in models with partial dependence information on subgroups, Depend Model 5, 59–74. [41] Shaked, M. and J.G. Shantikumar. (2007). Stochastic Orders, Springer, New York. [42] Tian, R. (2008). Moment problems with applications to Value-at-Risk and portfolio management, Georgia State University, Dissertation.
# Recent questions tagged youngs-double-slit-experiment Questions from: ### In a Young's double slit experiment separation between two consecutive dark. fringes is $1.2 \;mm$, wave length $\lambda$ of light is $600 \;nm$ Distance between source and screen $D= 1 m$, the separation between slits is To see more, click for the full list of questions or popular tags.
# SICP Solutions ### Chapter 4, Metalinguistic Abstraction #### Exercise 4.28 Because operator might be a thunk, or allow me to call it delayed expression! For eg: Consider tha ‘lambda’ passed as an argument. Let’s first view how a map procedure might look: With normal order evaluation, proc is a thunk and it contains thunk (lambda (x) (+ x 1)). Now since it is a thunk, so we need to force its evaluation and need to we call actual-value. Also, actual-value can work in both cases, either we pass a thunk or not. If thunk is passed then it first get the expression inside the thunk and evaluates it else it directly evaluates the expression. So, we can always call actual-value instead of eval, because it will work for both cases either when operator is a thunk or not. #### Note I think it’s better to explain the logical/conceptual reason for why something might work or not instead of explaining that what will happen if that code is executed. It’s better to explain why by saying because it is this thing we need to do this instead of saying because code won’t work correctly in that way. The reason is that latter explains why it won’t work in a certain way but it won’t explain why it would would work in the proposed way. For eg: in the above code, another way to explain is If we had used eval, then this code (proc (car items)) won’t work because evaluating proc would not work because proc contains 'thunk and when we pass this to eval then eval will try to lookup the procedure thunk instead of executing lambda. Notice that this explanation only explained why calling eval won’t work it did not explained why calling actual-value would work. I might have done similar explanation earlier, I hope I will stick to logical way to explain instead of implementation way of explaining things.
# Convex optimization ## Unconstrained 1. Toolbox ⟹ CVX • CVX: package if you to get the solution to such a problem: $\begin{cases} \sup c^T x Ax = b x ≥ 0 \end{cases}$ 2. Ellipsoid • in 1D: dichotomy • in higher dimension: $E_k ≝ \lbrace (x - x_k)^T P_k^{-1} (x - x_k) ≤ 1 \rbrace$ where $P_k$ is positive def. • ex: if $P_k = σ^2 I_n$ ⟶ ball of radius $σ$ Algorithm: make sure that x* ∈ E_k reduce the "size" of E_k f(x) ≥ f(x_k) + f'(x_k)^T (x - x_k) Find E_{k+1} as the minimum volume ellipsoid containing E_k ∩ {(x-x_k)^T f'(x_k) ≤ 0} $d(x_k, x^\ast) = O(\exp(- \frac{k}{12 n^2}))$ NB: contrary to gradient descent, it cannot “get lucky”, in that the $O(⋯)$ is almost equality. 4. Newton Algorithm: • $x_{k+1} = x_k - γ f’(x_k)$ • Choice of $γ$: • constant • line-search • exact: $\inf_{γ≥0} f(x_k - γ f’(x_k))$ • inexact Proposition: Assume • $f$ cvx and $C^2$ • all eigenvalues of $f’‘(x)$ are in $(μ, L)$ for all $x$ (where $μ$ is the smallest eigenvalue and $L$ the largest one) NB: • always $μ ≥ 0$ • $μ > 0 ⟺ f \text{ strongly convex}$ Algorithm: • $x_{k+1} = x_k - γ f’(x_k)$ • if $γ = \frac 1 L$, then $\begin{cases} \Vert x_k - x_\ast \Vert^\ast ≤ (1 - \frac μ L)^k \Vert x_0 - x_\ast \Vert^2 \\ f(x_k) - f(x_\ast) ≤ \frac L k \Vert x_0 - x_\ast \Vert^2 \end{cases}$ Summary: • If $f$ strongly convex, gradient descent is linearly/geometrically convergent (i.e. warning: linealry means times $\exp(- \ast k)$ (the number of digits grows linearly)) • $O(n)$ per iteration • if $f$ is not convex, then there’s convergence to a stationary point only ## Newton’s method Idea: optimize local quadratic Taylor expansion • no parameter • quadratically convergent: $c \Vert x_{k+1} - x_\ast \Vert ≤ (c \Vert x_k - x_\ast \Vert)^2$ Disavantages: • Instable far from $x^\ast$ • $O(n^3)$ per iteration: each step is very expensive Ridge regression estimator $\hat{w}$: \hat{w} ≝ argmin \frac{1}{2n} \Vert y - Xw \Vert^2_2 + \frac λ 2 \Vert w \Vert^2_2 In TP, we showed that if $λ = 0$: \hat{w}_0 = (X^T X)^{-1} X^T y Solve first order condition ($\nabla = 0$) $F$ is convex, so $w^\ast$ satisfies $\nabla F(w^\ast) = 0$. \begin{align*} 0 = \nabla F (w) & = \frac{-1}{n} X^T (y - Xw) + λ w \\ ⟹ & \hat{w} = (λn I + X^T X)^{-1} X^T y\\ \end{align*} $f$ is $λ$-strongly cvx: f - \frac λ 2 \Vert \bullet \Vert^2 \text{ is convex } 1. $\Vert w_n - w_{n+1} \Vert ≤ ε$ 2. $\Vert \nabla F\Vert ≤ ε$ 3. $\Vert w_n - w^\ast \Vert$ (kind of cheating: if we already know the gradient) \nabla F (w) = \frac{-1}{n} X^T (y - Xw) + λ w
# How do you test the improper integral int x^-2 dx from [2,oo) and evaluate if possible? Apr 13, 2017 ${\int}_{2}^{\infty} {x}^{-} 2 \mathrm{dx} = \frac{1}{2}$ #### Explanation: We will find the antiderivative of ${x}^{-} 2$ as normal. When we "evaluate" at infinity, we will take the limit of the antiderivative at infinity instead. Note that: $\int {x}^{-} 2 \mathrm{dx} = {x}^{-} \frac{1}{- 1} + C$ $\textcolor{w h i t e}{\int {x}^{-} 2 \mathrm{dx}} = - \frac{1}{x} + C$ So: ${\int}_{2}^{\infty} {x}^{-} 2 \mathrm{dx} = {\left[- \frac{1}{x}\right]}_{2}^{\infty}$ $\textcolor{w h i t e}{{\int}_{2}^{\infty} {x}^{-} 2 \mathrm{dx}} = \left[{\lim}_{x \rightarrow \infty} \left(- \frac{1}{x}\right)\right] - \left(- \frac{1}{2}\right)$ The limit approaches $0$: $\textcolor{w h i t e}{{\int}_{2}^{\infty} {x}^{-} 2 \mathrm{dx}} = 0 - \left(- \frac{1}{2}\right)$ $\textcolor{w h i t e}{{\int}_{2}^{\infty} {x}^{-} 2 \mathrm{dx}} = \frac{1}{2}$ So, the area under $\frac{1}{x} ^ 2$ from $x = 2$ onwards infinitely is only $\frac{1}{2}$.
# DRIVE: Digital Retinal Images for Vessel Extraction Dataset¶ This package is part of the signal-processing and machine learning toolbox Bob. It provides an interface for the DRIVE Dataset. This package does not contain the original data files, which need to be obtained through the link above. The DRIVE database has been established to enable comparative studies on segmentation of blood vessels in retinal images. The set of 40 images has been divided into a training and a test set, both containing 20 images. For the training images, a single manual segmentation of the vasculature is available. For the test cases, two manual segmentations are available; one is used as gold standard, the other one can be used to compare computer generated segmentations with those of an independent human observer. All human observers that manually segmented the vasculature were instructed and trained by an experienced ophthalmologist. They were asked to mark all pixels for which they were for at least 70% certain that they were vessel. If you use this package, please cite the authors of the database: @article{staal:2004-855, author = {J.J. Staal AND M.D. Abramoff AND M. Niemeijer AND M.A. Viergever AND B. van Ginneken}, title = {{Ridge based vessel segmentation in color images of the retina}}, journal = {{IEEE Transactions on Medical Imaging}}, year = {2004}, volume = {23}, number = {4}, pages = {501-509} }
## zmudz one year ago Let $$n$$ be a positive integer. Show that the smallest integer greater than $$(\sqrt{3} + 1)^{2n}$$is divisible by $$2^{n+1}.$$ Hint: Prove that $$\lceil (\sqrt{3}+1)^{2n} \rceil = (\sqrt{3}+1)^{2n} + (\sqrt{3}-1)^{2n}.$$ 1. amilapsn you can use mathematical induction with binomial expansion. 2. anonymous Id probably say make a recurrence relation 3. zzr0ck3r A number can't be equal to itself plus a nonzero number... 4. anonymous Isn't that$\lceil (\sqrt{3}+1)^{2n} \rceil = (\sqrt{3}+1)^{n} + (\sqrt{3}-1)^{n}.$ 5. amilapsn $\exists ! 0.abcd.... ~s.t.$ $\lceil{(\sqrt{3}+1)^{2n}}\rceil=(\sqrt{3}+1)^{2n}+0.abcd.....$ $But~ 0<(\sqrt{3}-1)^{2n}< 1\forall n\in \mathbb{Z}^+$ By binomial expansion: $\small (\sqrt{3}+1)^{2n}=^{2n}C_0 (\sqrt{3})^{2n}+^{2n}C_1 (\sqrt{3})^{2n-1}+\ldots+^{2n}C_{2n-1} (\sqrt{3})^{1}+^{2n}C_{2n} 1 ~~(2n+1~Terms)$ $\small (\sqrt{3}-1)^{2n}=^{2n}C_0 (\sqrt{3})^{2n}-^{2n}C_1 (\sqrt{3})^{2n-1}+\ldots-^{2n}C_{2n-1} (\sqrt{3})^{1}+^{2n}C_{2n} 1 ~~(2n+1~Terms)$ $\small \therefore (\sqrt{3}+1)^{2n}+(\sqrt{3}-1)^{2n}=2 \left[^{2n}C_0 (\sqrt{3})^{2n}+^{2n}C_{2} (\sqrt{3})^{2n-2}+\ldots+^{2n}C_{2n-2} (\sqrt{3})^{2}+^{2n}C_{2n} 1\right]$ Since all the terms have even exponents of $$\sqrt{3}$$ this will be an integer... $\therefore \lceil{(\sqrt{3}+1)^{2n}}\rceil=(\sqrt{3}+1)^{2n}+(\sqrt{3}-1)^{2n}$ By mathematical induction we can show that.. $2^{n+1}|\lceil{(\sqrt{3}+1)^{2n}}\rceil$ 6. zzr0ck3r suppose $$a=a+b$$ where $$b\ne 0$$. Then by cancelation $$b=0$$ a contradiction. :) 7. amilapsn @zzr0ck3r you've forgotten the ceiling... :) for example $$\lceil 2.5\rceil =3$$ 8. zzr0ck3r Ahh, sorry on my phone and I thought it was just $$[$$. lol. I was going nuts...
Refer to the figure shown. An $$E$$-$$V$$ battery and infinitely many solenoids of $$1!$$, $$2!$$, $$3!$$ and so on. The number of loops is connected in parallel. Another circuit has each of the infinite number of corresponding identical solenoids having $$N$$ loops and internal resistance $$R$$ connected in parallel. This circuit is placed near but not in contact with the former circuit. In terms of the variables given, determine the total current induced when the switch is closed. Details and Assumptions: • $$V$$ is read as Volts, the unit of potential difference. • All elements are Ohmic. ×
Q If the same wedge is made rough then the time taken by body to come down becomes n times more (nt) Then find the Coefficient of Friction between body and wedge in term of n? Views For the below figure The distance S is the same. And using this concept we get $\mu=tan\theta\left[1-\frac{1}{n^{2}} \right ]$ Where $\mu=$coefficient of friction $\theta=$ Angle of inclination n = an integer Exams Articles Questions
1 . Which of the following will come next in the following series ? a z a b y a b c x a b c d w a b c d [ A ]    f [ B ]    u [ C ]    a [ D ]    v [ E ]    e Answer : Option E Explanation :
# Homework Help: Quantum Harmonic Oscillator ladder operator 1. Apr 2, 2013 ### bobred 1. The problem statement, all variables and given/known data What is the effect of the sequence of ladder operators acting on the ground eigenfunction $\psi_0$ 2. Relevant equations $\hat{A}^\dagger\hat{A}\hat{A}\hat{A}^\dagger\psi_0$ 3. The attempt at a solution I'm not sure if I'm right but wouldn't this sequence of opperators on the ground state result in zero? 2. Apr 2, 2013 ### vela Staff Emeritus Yeah. 3. Apr 2, 2013 ### bobred Thanks, I thought so. This is part of a larger question concerning the expectation value of $\left\langle p^4_x \right\rangle$. We are asked to show that $\left\langle p^4_x \right\rangle=\displaystyle\frac{ \hbar^4}{4 a^4}\left[ \displaystyle\int^\infty _{-\infty} \psi^*_0 \left(\hat{A}\hat{A}\hat{A}^\dagger\hat{A}^\dagger + \hat{A}\hat{A}^\dagger\hat{A}\hat{A}^\dagger + \hat{A}^\dagger\hat{A}\hat{A}\hat{A}^\dagger\right) \psi_0 dx \right]=\displaystyle\frac{3 \hbar^4}{4 a^4}$ Where $\psi_0=\left( \displaystyle\frac{1}{\sqrt{\pi}a} \right)^\frac{1}{2}e^{-x^2/2a^2}$ This is not what I get. Due to $\hat{A}^\dagger\hat{A}\hat{A}\hat{A}^\dagger\psi_0=0$ I get $\displaystyle\frac{2 \hbar^4}{4 a^4}$ am I missing something? Thanks 4. Apr 2, 2013 ### vela Staff Emeritus What are you getting for the other terms? It works out as it should for me. The first term should give you a 2 by itself. 5. Apr 2, 2013 ### bobred As the first two sequences of operators preserve the ground eigenfunction and the last sequence zero, I get $\left\langle p^4_x \right\rangle=\displaystyle\frac{ \hbar^4}{4 a^4}\left[ 2 \displaystyle\int^\infty _{-\infty} \psi^*_0 \psi_0 dx \right]=\displaystyle\frac{2 \hbar^4}{4 a^4}$ Thanks 6. Apr 2, 2013 ### vela Staff Emeritus Remember that the raising and lowering operators don't simply change the state but introduce a multiplicative factor as well. 7. Apr 2, 2013 ### bobred Right so I should be looking at $\hat{A}\hat{A}^\dagger-\hat{A}^\dagger\hat{A}=1$? 8. Apr 3, 2013 ### bobred Hi Thanks for your help what I needed was $\sqrt{n}\psi_{n-1}$ and $\sqrt{n+1}\psi_{n+1}$. I now get the correct answer.
# Omit Canary Hosts Predicate¶ This extension may be referenced by the qualified name envoy.retry_host_predicates.omit_canary_hosts Note This extension is intended to be robust against untrusted downstream traffic. It assumes that the upstream is trusted. ## config.retry.omit_canary_hosts.v2.OmitCanaryHostsPredicate¶ [config.retry.omit_canary_hosts.v2.OmitCanaryHostsPredicate proto] {}
1.9 Waves  (Page 3/7) Page 3 / 7 Earthquake waves under Earth’s surface also have both longitudinal and transverse components (called compressional or P-waves and shear or S-waves, respectively). These components have important individual characteristics—they propagate at different speeds, for example. Earthquakes also have surface waves that are similar to surface waves on water. Why is it important to differentiate between longitudinal and transverse waves? In the different types of waves, energy can propagate in a different direction relative to the motion of the wave. This is important to understand how different types of waves affect the materials around them. Phet explorations: wave on a string Watch a string vibrate in slow motion. Wiggle the end of the string and make waves, or adjust the frequency and amplitude of an oscillator. Adjust the damping and tension. The end can be fixed, loose, or open. Section summary • A wave is a disturbance that moves from the point of creation with a wave velocity ${v}_{\text{w}}$ . • A wave has a wavelength $\lambda$ , which is the distance between adjacent identical parts of the wave. • Wave velocity and wavelength are related to the wave’s frequency and period by ${v}_{\text{w}}=\frac{\lambda }{T}$ or ${v}_{\text{w}}=\mathrm{f\lambda }.$ • A transverse wave has a disturbance perpendicular to its direction of propagation, whereas a longitudinal wave has a disturbance parallel to its direction of propagation. Conceptual questions Give one example of a transverse wave and another of a longitudinal wave, being careful to note the relative directions of the disturbance and wave propagation in each. What is the difference between propagation speed and the frequency of a wave? Does one or both affect wavelength? If so, how? Problems&Exercises Storms in the South Pacific can create waves that travel all the way to the California coast, which are 12,000 km away. How long does it take them if they travel at 15.0 m/s? $t=9\text{.}\text{26 d}$ Waves on a swimming pool propagate at 0.750 m/s. You splash the water at one end of the pool and observe the wave go to the opposite end, reflect, and return in 30.0 s. How far away is the other end of the pool? Wind gusts create ripples on the ocean that have a wavelength of 5.00 cm and propagate at 2.00 m/s. What is their frequency? $f=\text{40}\text{.}\text{0 Hz}$ How many times a minute does a boat bob up and down on ocean waves that have a wavelength of 40.0 m and a propagation speed of 5.00 m/s? Scouts at a camp shake the rope bridge they have just crossed and observe the wave crests to be 8.00 m apart. If they shake it the bridge twice per second, what is the propagation speed of the waves? ${v}_{\text{w}}=\text{16}\text{.}\text{0 m/s}$ What is the wavelength of the waves you create in a swimming pool if you splash your hand at a rate of 2.00 Hz and the waves propagate at 0.800 m/s? What is the wavelength of an earthquake that shakes you with a frequency of 10.0 Hz and gets to another city 84.0 km away in 12.0 s? $\lambda =\text{700 m}$ Radio waves transmitted through space at $3\text{.}\text{00}×{\text{10}}^{8}\phantom{\rule{0.25em}{0ex}}\text{m/s}$ by the Voyager spacecraft have a wavelength of 0.120 m. What is their frequency? Your ear is capable of differentiating sounds that arrive at the ear just 1.00 ms apart. What is the minimum distance between two speakers that produce sounds that arrive at noticeably different times on a day when the speed of sound is 340 m/s? $d=\text{34}\text{.}\text{0 cm}$ (a) Seismographs measure the arrival times of earthquakes with a precision of 0.100 s. To get the distance to the epicenter of the quake, they compare the arrival times of S- and P-waves, which travel at different speeds. [link] ) If S- and P-waves travel at 4.00 and 7.20 km/s, respectively, in the region considered, how precisely can the distance to the source of the earthquake be determined? (b) Seismic waves from underground detonations of nuclear bombs can be used to locate the test site and detect violations of test bans. Discuss whether your answer to (a) implies a serious limit to such detection. (Note also that the uncertainty is greater if there is an uncertainty in the propagation speeds of the S- and P-waves.) what is variations in raman spectra for nanomaterials I only see partial conversation and what's the question here! what about nanotechnology for water purification please someone correct me if I'm wrong but I think one can use nanoparticles, specially silver nanoparticles for water treatment. Damian yes that's correct Professor I think Professor what is the stm is there industrial application of fullrenes. What is the method to prepare fullrene on large scale.? Rafiq industrial application...? mmm I think on the medical side as drug carrier, but you should go deeper on your research, I may be wrong Damian How we are making nano material? what is a peer What is meant by 'nano scale'? What is STMs full form? LITNING scanning tunneling microscope Sahil how nano science is used for hydrophobicity Santosh Do u think that Graphene and Fullrene fiber can be used to make Air Plane body structure the lightest and strongest. Rafiq Rafiq what is differents between GO and RGO? Mahi what is simplest way to understand the applications of nano robots used to detect the cancer affected cell of human body.? How this robot is carried to required site of body cell.? what will be the carrier material and how can be detected that correct delivery of drug is done Rafiq Rafiq what is Nano technology ? write examples of Nano molecule? Bob The nanotechnology is as new science, to scale nanometric brayan nanotechnology is the study, desing, synthesis, manipulation and application of materials and functional systems through control of matter at nanoscale Damian Is there any normative that regulates the use of silver nanoparticles? what king of growth are you checking .? Renato What fields keep nano created devices from performing or assimulating ? Magnetic fields ? Are do they assimilate ? why we need to study biomolecules, molecular biology in nanotechnology? ? Kyle yes I'm doing my masters in nanotechnology, we are being studying all these domains as well.. why? what school? Kyle biomolecules are e building blocks of every organics and inorganic materials. Joe anyone know any internet site where one can find nanotechnology papers? research.net kanaga sciencedirect big data base Ernesto Introduction about quantum dots in nanotechnology what does nano mean? nano basically means 10^(-9). nanometer is a unit to measure length. Bharti do you think it's worthwhile in the long term to study the effects and possibilities of nanotechnology on viral treatment? absolutely yes Daniel Got questions? Join the online conversation and get instant answers!
## The difference and sum of projectors.(English)Zbl 1060.15011 The authors give simple proofs, without reference to rank theory for matrices, of some results on the non-singularity of the difference $$P-Q$$ of projections $$P$$ and $$Q$$ obtained by J. Gross and G. Trenkler [SIAM J. Matrix Anal. Appl. 21, No. 2, 390–395 (1999; Zbl 0946.15020)]. In the proofs, the relations among the ranges and the null-spaces of projections $$P$$ and $$Q$$ have been employed. A new characterization of the non-singularity of $$P-Q$$ in terms of the non-singularity of $$P+Q$$ has been obtained. The authors also give necessary and sufficient conditions for the non-singularity of $$P+Q$$ and explicit formulae for the inverse of $$P+Q$$ separately in the case when $$P-Q$$ is non-singular, and when $$P-Q$$ is singular. ### MSC: 15A09 Theory of matrix inversion and generalized inverses 15A24 Matrix equations and identities ### Keywords: projector; direct sum; inverse; non-singularity Zbl 0946.15020 Full Text: ### References: [1] Buckholtz, D, Inverting the difference of Hilbert space projections, Amer. math. monthly, 104, 60-61, (1997) · Zbl 0901.46019 [2] Buckholtz, D, Hilbert space idempotents and involutions, Proc. amer. math. soc., 128, 1415-1418, (2000) · Zbl 0955.46015 [3] Groß, J; Trenkler, G, Nonsingularity of the difference of two oblique projectors, SIAM J. matrix anal. appl., 21, 390-395, (1999) · Zbl 0946.15020 [4] Koliha, J.J, Range projections of idempotents in $$C\^{}\{∗\}$$-algebras, Demonstratio math., 34, 91-103, (2001) · Zbl 0981.46047 [5] Koliha, J.J; Rakočević, V, On the norms of idempotents in $$C\^{}\{∗\}$$-algebras, Rocky mountain J. math., 34, 685-697, (2004) · Zbl 1066.46044 [6] Ljance, V.E, Some properties of idempotent operators (in Russian), Teor. i prikl. mat. L’vov, 1, 16-22, (1959) [7] Marsaglia, G; Styan, G.P.H, Equalities and inequalities for the rank of matrices, Linear multilinear algebra, 2, 269-292, (1974) [8] Pták, V, Extremal operators and projectors, Časopis Pěst. mat., 110, 343-350, (1985) · Zbl 0611.47022 [9] Rakočević, V, On the norm of idempotent in a Hilbert space, Amer. math. monthly, 107, 748-750, (2000) · Zbl 0993.47009 [10] Tian, Y; Styan, G.P.H, Rank equalities for idempotent and involutory matrices, Linear algebra appl., 335, 101-117, (2001) · Zbl 0988.15002 [11] Vidav, I, On idempotent operators in a Hilbert space, Publ. inst. math. (beograd), 4, 18, 157-163, (1964) · Zbl 0125.35101 [12] Wimmer, H.K, Canonical angles of unitary spaces and perturbations of direct complements, Linear algebra appl., 287, 373-379, (1999) · Zbl 0937.15002 [13] Wimmer, H.K, Lipschitz continuity of projectors, Proc. amer. math. soc., 128, 873-876, (2000) · Zbl 0935.46014 This reference list is based on information provided by the publisher or from digital mathematics libraries. Its items are heuristically matched to zbMATH identifiers and may contain data conversion errors. It attempts to reflect the references listed in the original paper as accurately as possible without claiming the completeness or perfect precision of the matching.
# xcube=64 ## Simple and best practice solution for xcube=64 equation. Check how easy it is, and learn it for the future. Our solution is simple, and easy to understand, so dont hesitate to use it as a solution of your homework. If it's not what You are looking for type in the equation solver your own equation and let us solve it. ## Solution for xcube=64 equation: Simplifying xcube = 64 Solving bceux = 64 Solving for variable 'b'. Move all terms containing b to the left, all other terms to the right. Divide each side by 'ceux'. b = 64c-1e-1u-1x-1 Simplifying b = 64c-1e-1u-1x-1` ## Related pages 0.075 as a fractionalogxprime factorization 58x 2 sinxsquare root of 67587 roman numeralswhat is the greatest common factor of 45 and 72x 2 12x 4 06y 9 3 2y 38y 2y 3 9canvas.allenisd.orgwhat is 3.5 percent of 250000how much is 30 pounds in nairaconvert 0.65 to a fraction6x 3y 6gcf of 150493.90the prime factorization of 102solving binomial equations calculator650-175106-48derivative of ln cosxgreatest common factor of two monomials calculatorwhat is the derivative of tanxfactoring calculator stepswhat is prime factorization of 49prime factorization of 186factor 4x 2 16x 16percent to a decimal calculatorx2 2x 63derivative 3xgreatest common factor of 72 and 54cscx secx tanxsolution to the system of equations calculatorv lwh calculator6y 9 3 2y 3common multiples of 4 and 8equation calculator & solverwhat is the prime factorization of 14063r2 sinx graphwhat is the prime factorization of 103what is the prime factorization of 142lcm calulatormaths equations calculatoradding fractions calculator with mixed numberswhat is the prime factorization of 594x 2y 8 0logaritme5t lbwhat is the greatest common factor of 72 and 54prime factorization of 1974-25prime factorization of 112254 prime factorizationprime factors of 375derive ln 2xtanx secx 2cosx4x 2y 8 0algebra calculater3y 4x 12log28roman numerals 85derivative of y sinxhow much is 4000 rupees in poundssin 2x differentiatecos2x x0.125 as fractionderivative of 3xmultiples of 252solve y 2x-63000 dollars to pounds90lb to kgprime factorization of 81sqrt x 2 y 2
Lecture 2: Sampling Triangles (43) shannonhu-144 V dot N is |V||N|cos(theta), where theta is the angle between N and V. If V dot N is greater than 0, then theta must be less than 90, so P must be on the same half-plane as N. If V dot N is 0, then theta is 90, so P must be on the line. If V dot N is negative, then theta is between 90 and 180 (exclusive), so P must be on the opposite half-plane as N.
# 13.4: Accounting for Product Warranties Learning Objectives At the end of this section, students should be able to meet the following objectives: 1. Explain the difference between an embedded and an extended product warranty. 2. Account for the liability and expense incurred by a company that provides its customers with an embedded warranty on a purchased product. 3. Account for the amount received on the sale of an extended warranty and any subsequent cost incurred as a result of this warranty. 4. Compute the average age of accounts payable. Question: FASB Statement Number 5 includes anembedded product warrantyas an example of a contingency. A company sells merchandise such as a car or a microwave and agrees to fix certain problems if they arise within a specified period of time. If the car’s transmission breaks, for example, the seller promises to replace it. Making the sale with a warranty attached is the past event that creates this contingency. However, the item acquired by the customer must break before the company has an actual loss. That outcome is uncertain. In accounting for contingencies, several estimates are required: • The approximate number of claims • The likelihood that claims will result from the warranty • The eventual cost As an example, General Electric reported on its December 31, 2008, balance sheet a liability for product warranties totaling over $1.68 billion. That is certainly not a minor obligation. In the notes to the financial statements, the company explains, “We provide for estimated product warranty expenses when we sell the related products. Because warranty estimates are forecasts that are based on the best available information—mostly historical claims experience—claims costs may differ from amounts provided.How does a company record and report contingencies such as product warranties? Answer: In accounting for warranties, cash rebates, the collectability of receivables and other similar contingencies, the likelihood of loss is not an issue. These losses are almost always probable. For the accountant, the challenge is in arriving at a reasonable estimate of that loss. How many microwaves will break and have to be repaired? What percentage of cash rebate coupons will be presented by customers in the allotted time? How often will a transmission need to be replaced? Many companies utilize such programs on an ongoing basis so that data from previous offers will be available to help determine the amount of the expected loss. However, historical trends cannot be followed blindly. Officials still have to be alert for any changes that could impact previous patterns. For example, in bad economic periods, customers are more likely to take the time to complete the paperwork required to receive a cash rebate. Or the terms may vary from one warranty program to the next. Even small changes in the wording of an offer can alter the expected number of claims. To illustrate, assume that a retail store sells ten thousand refrigerators during Year One for$400 cash each. The product is covered by a warranty that extends until the end of Year Three. No claims are made in Year One but similar programs in the past have resulted in repairs being made to 3 percent of the refrigerator at an average cost of $90. Thus, this warranty is expected to cost a total of$27,000 (ten thousand units × 3 percent or three hundred claims × $90 each). Immediate recognition is appropriate because the loss is both probable and subject to reasonable estimation. Although no repairs are made in Year One, the$27,000 is recognized in that period. All requirements for a liability have been met. In addition, the matching principle states that expenses should be recorded in the same period as the revenues they help generate. The revenue from the sale of the refrigerators is recognized in Year One so the warranty expense resulting from those revenues is also included at that time. Figure 13.11 Year One—Sale of Ten Thousand Refrigerators for $400 Each Figure 13.12 Year One—Recognize Expected Cost of Warranty Claims This warranty is in effect until the end of Year Three. Assume in the year following the sale (Year Two) that repairs costing$13,000 are made for these customers at no charge. Refrigerators break and are fixed as promised. The expense has already been recognized in the year of sale so the payments made by the company serve to reduce the recorded liability. They have no additional impact on net income. Figure 13.13 Year Two—Payment for Repairs Covered by Warranty At the end of Year Two, the warranty payable T-account in the general ledger holds a balance of $14,000 ($27,000 original estimation less $13,000 payout for repairs to date). Because the warranty has not expired, company officials need to evaluate whether this$14,000 liability is still a reasonable estimation of the remaining costs to be incurred. If so, no further adjustment is made. However, the original $27,000 was an estimate. More information is now available, some of which might suggest that$14,000 is no longer the best number to be utilized for the final period of the warranty. As an illustration, assume that a design flaw has been found in the refrigerators and that $20,000 (rather than$14,000) is now the estimate of the costs to be incurred in the final year of the warranty. The $14,000 is no longer appropriate. The reported figure must be updated to provide a fair presentation of the information that is now available. Estimations should be changed at the point that new data provide a clearer vision of future events. Figure 13.14 December 31, Year Two—Adjust Warranty Liability from$14,000 to Expected $20,000 In this adjusting entry, the change in the expense is not recorded in the period of the sale. As discussed earlier, no retroactive changes are made in previously reported figures unless fraud occurred or an estimate was held to be so unreasonable that it was not made in good faith. Question: Not all warranties are built into a sales transaction. Many retailers also provideextended product warrantiesbut for an additional fee. For example, assume a business sells a high-definition television with an automatic one-year warranty. The buyer receives this warranty as part of the purchase price. The accounting for that first year is the same as just demonstrated; an estimated expense and liability are recognized at the time of sale. However, an additional warranty for three more years is also offered at a price of$50. If on January 1, Year One, a customer chooses to acquire this three-year coverage, what recording is made by the seller? Is an extended warranty purchased by a customer reported in the same manner as an automatic warranty embedded within a sales contract? Answer: Extended warranties, which are quite popular in some industries, are simply insurance policies. If the customer buys the coverage, the product is insured against breakage or other harm for the specified period of time. In most cases, the company is making the offer in an attempt to earn extra profit. The seller hopes that the amount received for the extended warranty will outweigh the eventual repair costs. Therefore, the accounting differs here from that demonstrated for an embedded warranty that was provided to encourage the sale of the product. Because of the matching principle, the anticipated expense was recognized in the same period as the revenue generated by the sale of the product. By accepting money for an extended warranty, the seller agrees to provide services in the future. This contract is much like a gift card. The revenue is not earned until the earning process is substantially complete in the future. Thus, the $50 received for the extended warranty is initially recorded as “unearned revenue.” This balance is a liability because the company owes a specified service to the customer. As indicated previously, liabilities do not always represent future cash payments. Figure 13.15 January 1, Year One—Sale of Extended Warranty Covering Years 2–4 Note that no expense was estimated and recorded in connection with this warranty. As explained by the matching principle, no expense is recognized until the revenue begins to be reported. Because of the terms specified, this extended warranty does not become active until January 1, Year Two. The television is then covered for a three-year period. The revenue is recognized, most likely on a straight-line basis, over that time. The$50 will be recognized at the rate of 1/3 per year or $16.66. Figure 13.16 December 31, Year Two (Three and Four)—Recognition of Revenue from Extended Warranty In any period in which a repair must be made, the expense is recognized as incurred because revenue from this warranty contract is also being reported. To illustrate, assume that on August 8, Year Two, a slight adjustment must be made to the television at a cost of$9. The product is under warranty so there is no charge to the customer for this service. The expense recognized below is matched with the Year Two revenue recognized above. Figure 13.17 August 8, Year Two—Repair Television under Contract ### Exercise Link to multiple-choice question for practice purposes: http://www.quia.com/quiz/2092999.html Question: Previously, the current ratio (current assets divided by current liabilities) and the amount of (current assets minus current liabilities) were discussed. Are there additional vital signs that relate to current liabilities that should be analyzed when looking at an organization? Should decision makers be aware of any specific ratios or amounts in connection with current liabilities that provide especially insightful information about a company’s financial health and operations? Answer: In studying current liabilities, the number of days a business takes to pay its accounts payable is a figure of interest. If a business begins to struggle, the time of payment tends to lengthen because of the difficulty in generating sufficient cash amounts. Therefore, an unexpected jump in this number is often one of the first signs of financial problems and warrants concern. To determine the age of accounts payable (or the number of days in accounts payable), the amount of inventory purchased during the year is first calculated: cost of goods sold = beginning inventory + purchases – ending inventory, Thus, purchases = cost of goods sold – beginning inventory + ending inventory. Using this purchases figure, the number of days that a company takes to pay its accounts payable on the average can be found. Either the average accounts payable for the year can be used below or just the ending balance. purchases/365 = average purchases per day accounts payable/average purchases per day = average age of accounts payable As an illustration, the following information comes from the 2008 financial statements for Safeway Inc. Figure 13.18 Information from 2008 Financial Statements for Safeway Inc. The total of inventory purchases by Safeway during 2008 was over $31 billion: purchases = cost of goods sold – beginning inventory + ending inventory purchases =$31.589 billion – $2.798 billion +$2.591 billion purchases = $31.382 billion. The average purchases amount made each day during 2008 by this company was nearly$86 million: purchases/365 $31.382/365 =$85.978 million. The average age of ending accounts payable for Safeway at this time is between twenty-eight and twenty-nine days: accounts payable/average daily purchases $2.449 billion/$85.978 million = 28.48 days. To evaluate that number, a decision maker would need to compare it to previous time periods, the typical payment terms for a business in that industry, and comparable figures from other similar corporations. Interestingly, the same computation for the previous year (2007) showed that Safeway was taking over thirty-four days to pay off its accounts payable during that period. ### Key Takeaway Many companies incur contingent liabilities as a result of product warranties. If the warranty is given to a customer along with a purchased item, an anticipated expense should be recognized at that time as well as the related liability. If the cost of this type of embedded warranty eventually proves to be incorrect, the correction is made when discovered. Companies also sell extended warranties, primarily as a means of increasing profits. These warranties are recorded initially as liabilities and are reclassified to revenue over the time of the obligation. Subsequent costs are expensed as incurred to align with the matching principle. Expenses are not estimated and recorded in advance. Analysts often determine the average age of accounts payable to determine how quickly liabilities are being paid as an indication of an entity’s financial health. #### Talking with a Real Investing Pro (Continued) Following is a continuation of our interview with Kevin G. Burns. Question: Analysts often look closely at current liabilities when evaluating the future prospects of a company. Is there anything in particular that you look for when examining a company and its current liabilities? Kevin Burns: For almost any company, there are a number of things that I look at in connection with current liabilities. I always have several questions where possible answers can concern me. I am interested in the terms of the current liabilities as well as the age of those liabilities. In other words, is the company current with its payments to vendors? Does the company have a significant amount of current liabilities but only a small amount of current assets? Or, stated more directly, can these liabilities be paid on time? Have current liabilities been growing while business has remained flat or grown much more slowly? Are any of the current liabilities to organizations controlled by corporate insiders? That always makes me suspicious so that, at the very least, I want more information. In sum, I like balance sheets where there are no potential conflicts of interest and the company is a reasonably fast payer of its debts. ### Video Clip Unnamed Author talks about the five most important points in Chapter 13 “In a Set of Financial Statements, What Information Is Conveyed about Current and Contingent Liabilities?”.
# Length of time in a satellite spends in Earths shadow I have come across a problem that has had me stumped for a while now. A spacecraft is in a Sun-synchronous Earth orbit with a =1.40 R⊕ and e = 0.2. The argument of periapsis is ω = 0◦ , and the Right Ascension of the Ascending Node, Ω, is equal to the Sun’s Right Ascension plus 12 hours. Calculate the length of time per orbit for which the spacecraft is in Earth’s shadow, assuming that the Earth is at an equinox. I'm not fully sure how to interpret this, as everywhere I read, Sun-synchronous orbits are always sunlit? Any help or a point in the right direction would be much appreciated. • How sun-synchronous orbits really work is not a trivial thing to understand, or even explain, especially because of the substantial inclination of the Earth's axis relative to its orbit around the Sun. But I'm sure that nowhere reasonable have you read that "...Sun-synchronous orbits are always sunlit" though they are possible. If you can find a link, or cite a paragraph with block-quotes describing what it is that you've read, maybe we can figure out the disconnect there. A high orbit that follows the terminator (dusk/dawn) can in principle remain in Sunlight essentially all of the time. – uhoh Jan 7 '18 at 2:47 • There may be a very rare eclipse of the Sun by the Moon, but those can be predicted and the orbit planned to avoid them. Take a look here for more info... en.wikipedia.org/wiki/Sun-synchronous_orbit#Technical_details Also check out this video youtu.be/4K5FyNbV0nA It is possible that this simulator (where you can enter your data) can "shed some light" on the problem, but I am not sure how to "turn on" the Sun and visualize daylight with it yet. orbitalmechanics.info – uhoh Jan 7 '18 at 2:51 • Also, Quora; quora.com/What-is-a-Sun-synchronous-orbit and also quora.com/… where a beta-angle of about 90 degrees would put the orbit near the terminator and therefore more likely to remain in sunlight. Also this; wmo.int/pages/prog/sat/globalplanning_en.php – uhoh Jan 7 '18 at 2:58 • "the Right Ascension of the Ascending Node, Ω, is equal to the Sun’s Right Ascension plus 12 hours." This means that when the satellite crosses the equator going North, it is midnight local time. – Ghedipunk Dec 27 '19 at 16:34 Sun-synchronous orbits are inclined by a few degrees away from going directly over the poles. Because of these, they can go “behind” the Earth at either the north or South Pole, going into shadow. (At equinox has the easiest geometry for this, because the terminator is also aligned with the poles) The first step for this problem would be to work out the exact inclination needed by the given orbit. The Wikipedia article explains how to do this. Then take that inclined orbit as an ellipse, project it onto the plane perpendicular to the Sum, and see where it goes behind the Earth. as everywhere I read, Sun-synchronous orbits are always sunlit? No. What you probably read is that the only orbit that can be always sunlit is a sun-synchronous orbit. A sun-synchronous orbit has an inclination from Earth's rotational axis, generally about eight degrees but the precise amount depends on the altitude of the orbit. Because of Earth's equatorial bulge, the orbit precesses at about one degree per day, so it is always over the same local time in the same point of its orbit. If the orbit follows the terminator, that local time happens to be dawn / dusk, and so the satellite is never in the Earth's shadow. But if that local time happens to be noon/midnight, or most any other time, then it will of course be eclipsed once per orbit. It's possible that the moon may eclipse the sun while the satellite is in orbit. This can be planned for and avoided by properly choosing the orbit, or, I imagine, later adjusting it. Or it can just be ignored because it wouldn't last that long for a fast moving satellite and the batteries can carry it through. • Argh, how can I not embed the youtube video but just provide a link?? – Ross Presser Dec 27 '19 at 16:26 • @RossPresser [link text here](http://example.com)... So in your case, "The video [Space: Prograde, Retrograde, and Sun-Synchronous Orbits](https://youtu.be/4K5FyNbV0nA) explains and visualizes many kinds..." – Ghedipunk Dec 27 '19 at 16:36 • @Ghedipunk Thanks! – Ross Presser Dec 27 '19 at 17:41 A sun-synchronous orbit is not an orbit that is always lit. Always being in sunlight is one potential benefit of a sun-synchronous orbit. These types of sun-synchronous orbits are often used with satellites that use power constantly, such as radar and lidar surveys. The main benefit of a sun-synchronous orbit, though, is that at the same period of your orbit, for every orbit, the sun will be in the same direction. If you're passively imaging the Earth, like the Landsat satellites do, you want shadows to be as shallow as possible, so you will want to have the sun directly above you each and every pass. You'll want as much of your sun-synchronous orbit to go over areas during local Noon as possible... which means that roughly half of your orbit will also be over an area that is in local midnight, as well. This homework question packs in a lot of jargon into a short paragraph. As this question is over a year old, so won't help with anyone's homework tonight, but would help people understand orbits in general, let's break it apart: a = 1.40 R⊕ and e = 0.2. The argument of periapsis is ω = 0◦, and the Right Ascension of the Ascending Node, Ω, is equal to the Sun’s Right Ascension plus 12 hours. Altitude = 1.4 * Earth's radius. (radius = 8,904 km; altitude above sea level = 2,544 km) Eccentricity = 0.2 (significant) Argument of periapsis (how close the periapsis is to the Ascending Node) = 0° Right Ascension of the Ascending Node = Sun's Right Ascension + 12 hours. From the average altitude, we can tell the orbital period: 2h19m47s. With an eccentricity of 0.2, periapsis will have a radius at 7,123 km (altitude: 763 km), and apoapsis will have a radius at 10,684 km (altitude: 4,324 km). With an Argument of Periapsis at 0°, our periapsis will occur right at the ascending node. That is, periapsis is right at the point in our orbit where we cross the equator while going north. The Right Ascension is a location relative to "the celestial sphere." Specifically, how far East we are of the westernmost point of the constellation Aries. Right Ascension is measured in time, with the celestial sphere separated into 24 hours, which are divided into 60 minutes, which are divided into 60 seconds. 6 hours = 90°... 12 hours = 180°, etc. Fortunately for us, our orbit is a sun-synchronous orbit. The Right Ascension of any point in our orbit will always have the same offset relative to the sun (plus or minus a little bit of wiggle room, since the Earth's gravity field is a lumpy oblate spheroid (continents are "heavier" than the ocean floor)). The Right Ascension of our Ascending Node is what we care about the most: Since our Right Ascension is measured relative to the Sun, we can know the local time below our craft, just by looking at our offset. In our case, our offset is 12 hours, so our Ascending Node will always be at local midnight (since, by definition, the sun is always located at local noon). We will be passing through the widest part of Earth's shadow. Our speed at periapsis is going to be 8,194 m/s, and at apoapsis will be 5,463 m/s. The circumference of our orbit will be 55,917 km. Since we're not actually launching a satellite, we aren't too worried about how big our batteries will need to be, or how much light our solar panels will need to gather. Our satellite is getting the same treatment as those pesky spherical cows that graze on frictionless, infinite planes. We'll handwave away the penumbra and the scattering of light from the atmosphere, and say that we have a binary condition for in light or in shadow, and that condition happens right at the center of the penumbra. (That is, right when the limb of the Earth covers the centerpoint of the sun.) With the Earth's radius at the poles being 6,356 km, we have a shadow 12,712 km wide. Since we're traveling along a curved path, we need to know how much distance we're traveling through that 12,712 km wide shadow... Our adjacent leg of our right triangle is the radius at periapsis: 7123 km. Our opposite leg is the radius of the Earth at its poles: 6356 km... tan(7123/6356) = 45.3°.
# DMGT ISSN 1234-3099 (print version) ISSN 2083-5892 (electronic version) # IMPACT FACTOR 2018: 0.741 SCImago Journal Rank (SJR) 2018: 0.763 Rejection Rate (2017-2018): c. 84% # Discussiones Mathematicae Graph Theory Article in press Authors: C.J. Jayawardene, D. Narváez, S.P. Radziszowski Title: Star-critical Ramsey numbers for cycles versus $K_4$ Source: Discussiones Mathematicae Graph Theory Received: 2017-09-25, Revised: 2018-11-05, Accepted: 2018-11-05, https://doi.org/10.7151/dmgt.2190 Abstract: Given three graphs $G$, $H$ and $K$ we write $K\rightarrow (G,H)$, if in any red/blue coloring of the edges of $K$ there exists a red copy of $G$ or a blue copy of $H$. The Ramsey number $r(G, H)$ is defined as the smallest natural number $n$ such that $K_n\rightarrow (G,H)$ and the star-critical Ramsey number $r_*(G, H)$ is defined as the smallest positive integer $k$ such that $K_{n-1} \sqcup K_{1,k} \rightarrow (G, H)$, where $n$ is the Ramsey number $r(G,H)$. When $n \geq 3$, we show that $r_*(C_n,K_4)=2n$ except for $r_*(C_3,K_4)=8$ and $r_*(C_4,K_4)=9$. We also characterize all Ramsey critical $r(C_n,K_4)$ graphs. Keywords: Ramsey theory, star-critical Ramsey numbers
### Show Posts This section allows you to view all posts made by this member. Note that you can only see posts made in areas you currently have access to. ### Messages - Junhong Zhou Pages: [1] 1 ##### Quiz 2 / Quiz2-6101 6D « on: October 02, 2020, 02:22:25 PM » Problem(3pt). Find all points of continuity of the given function; $$f(z)= \begin{cases}\frac{z^4-1}{z-i},& z\neq i\\4i, & z=i \end{cases}$$ f(z) is continuous when $z\neq i$. When z = i, then $z^4-1=i^4-1=1-1=0$ $z - i = i-i = 0$ Now use the L'Hospital's Rule we have: \begin{align*} \lim_{z \to i} \frac{z^4-1}{z-i} &= \lim_{z \to i} \frac{4z^3}{1}\\ &= 4i^3\\ &= -4i\\ & \neq 4i \end{align*} Therefore f(z) is not continuous at z = i. 2 ##### Quiz-3 / tut0402 quiz3 « on: October 11, 2019, 02:03:35 PM » Question: Find the Wronskian of the given pair of functions. $$cos^2(x), 1+cos(2x)$$ $$W = \det \begin{vmatrix} cos^2(x) & 1+cos2x \\ -2sin(x)cos(x) & -2sin(2x) \end{vmatrix} = \det \begin{vmatrix} cos^2(x) & 2cos^2(x) \\ -sin(2x) & -2sin(2x) \end{vmatrix}$$ \begin{align} \implies W &= -2cos^2(x)sin2(x)-(-sin(2x))(2cos^2(x))\notag\\ &= 2sin(2x)(-cos^2(x)+cos^2(x))\notag\\ &= 2sin(2x)\cdot0\notag\\ &= 0\notag \end{align} 3 ##### Quiz-2 / TUT0402 quiz2 « on: October 04, 2019, 02:00:07 PM » Question: Find an integrating factor and solve the given equation. $$(3x^2y+2xy+y^3)+(x^2+y^2)y'=0$$ \begin{align} M(x,y)=3x^2y+2xy+y^3 &\implies M_y=3x^2+2x+3y^2\notag\\ N(x,y)=x^2+y^2 &\implies N_x=2x\notag \end{align} Since $M_y \neq N_x$, this implies the given differential equation is not exact, so we need to find $\mu(x,y)$ such that the equation $\mu(3x^2+2xy+y^3)+\mu(x^2+y^2)y'=0$ is exact. $$R(x)=\frac{M_y-N_x}{N}=\frac{3x^2+3y^2}{x^2+y^2}=3$$ then we can write $\mu$: $$\mu(x,y)=e^{\int R(x)dx}=e^{\int 3 dx}=e^{3x}$$ multiply the given differential equation by $\mu$: \begin{align} \mu(3x^2y+2xy+y^3)+\mu(x^2+y^2)y' &= 0\notag\\ e^{3x}(3x^2y+2xy+y^3)+e^{3x}(x^2+y^2)y' &= 0\notag \end{align} Which is now an exact differential equation, this implies there exist $\varphi(x,y)$ such that $\varphi_x=M$ and $\varphi_y=N$. \begin{align} \varphi_y(x,y)=e^{3x}(x^2+y^2) &\implies \varphi(x,y) =\int e^{3x}(x^2+y^2)dy\notag\\ &\implies \varphi(x,y) = e^{3x}x^2y+\frac{1}{3}e^{3x}y^3+f(x)\notag \end{align} $$\varphi_x(x,y)=2e^{3x}xy+3e^{3x}x^2y+e^{3x}y^3+f'(x) \implies f'(x)=0 \implies f(x)=C\notag$$ Therefore: $$\varphi(x,y)=e^{3x}x^2y+\frac{1}{3}e^{3x}y^3=C$$ $$e^{3x}x^2y+\frac{1}{3}e^{3x}y^3=C$$ 4 ##### Quiz-1 / TUT0402 quiz1 « on: September 27, 2019, 02:00:07 PM » $$ty'+2y=sin(t)$$ write this into standard equation: $$y'+\frac{2}{t}y=\frac{sin(t)}{t}$$ which here we have $$p(t)=\frac{2}{t}$$ $$\mu(x)=e^{\int p(t)dt}=e^{\int\frac{2}{t}dt}=e^{2\ln{t}}=t^2$$ multiply $\mu(x)$ to both sides of the standard equation we have: \begin{align} (t^2)y'+(t^2)\frac{2}{t}y &= (t^2)\frac{sin(t)}{t}\notag\\ t^2y'+2ty &= tsin(t)\notag\\ \int t^2y'+2tydt &=\int tsin(t)dt\notag\\ t^2y &= \int tsin(t) \notag \end{align} integrate $\int tsin(t)dt$ by parts, let $u=t$ and $dv=sin(t)dt$: \begin{align} \int tsin(t)dt &= t(-cos(t))-\int -cos(t)dt\notag\\ &= -tcos(t)+\int cos(t)dt\notag\\ &= -tcos(t)+sin(t)+C\notag \end{align} so we have: \begin{align} t^2y &= -tcos(t)+sin(t)+C\notag\\ y &= \frac{-tcos(t)+sin(t)+C}{t^2}\notag \end{align} So: $$\lim_{t\to\infty}y=\lim_{t\to\infty}\frac{-tcos(t)+sin(t)+C}{t^2}=0$$ Pages: [1]
# The radio structure of the peculiar narrow-line Seyfert 1 galaxy candidate J1100+4421 [GA] Narrow-line Seyfert 1 galaxies (NLS1) are an intriguing subclass of active galactic nuclei. Their observed properties indicate low central black hole mass and high accretion rate. The extremely radio-loud NLS1 sources often show relativistic beaming and are usually regarded as younger counterparts of blazars. Recently, the object SDSS J110006.07+442144.3 was reported as a candidate NLS1 source. The characteristics of its dramatic optical flare indicated its jet-related origin. The spectral energy distribution of the object was similar to that of the gamma-ray detected radio-loud NLS1, PMN J0948+0022. Our high-resolution European Very Long Baseline Interferometry Network observations at 1.7 and 5 GHz revealed a compact core feature with a brightness temperature of >~ 10^(10) K. Using the lowest brightness temperature value and assuming a moderate Lorentz factor of ~9 the jet viewing angle is <~ 26 deg. Archival Very Large Array data show a large-scale radio structure with a projected linear size of ~150 kpc reminiscent of double-sided morphology. K. Gabanyi, S. Frey, Z. Paragi, et. al. Fri, 22 Sep 17 4/75 Comments: 8 pages, 5 figures. Accepted for publication in MNRAS # On the formation mechanisms of compact elliptical galaxies [GA] In order to investigate the formation mechanisms of the rare compact elliptical galaxies (cE) we have compiled a sample of 25 cEs with good SDSS spectra, covering a range of stellar masses, sizes and environments. They have been visually classified according to the interaction with their host, representing different evolutionary stages. We have included clearly disrupted galaxies, galaxies that despite not showing signs of interaction are located close to a massive neighbor (thus are good candidates for a stripping process), and cEs with no host nearby. For the latter, tidal stripping is less likely to have happened and instead they could simply represent the very low-mass, faint end of the ellipticals. We study a set of properties (structural parameters, stellar populations, star formation histories and mass ratios) that can be used to discriminate between an intrinsic or stripped origin. We find that one diagnostic tool alone is inconclusive for the majority of objects. However, if we combine all the tools a clear picture emerges. The most plausible origin, as well as the evolutionary stage and progenitor type, can be then determined. Our results favor the stripping mechanism for those galaxies in groups and clusters that have a plausible host nearby, but favors an intrinsic origin for those rare cEs without a plausible host and that are located in looser environments. A. Ferre-Mateu, D. Forbes, A. Romanowsky, et. al. Fri, 22 Sep 17 6/75 Comments: Accepeted for publication in MNRAS. 24 pages, 21 figures, 5 tables # On the extended stellar structure around NGC 288 [GA] We report on observational evidence of an extra-tidal clumpy structure around NGC 288 from an homogeneous coverage of a large area with the Pan-STARRS PS1 database. The extra-tidal star population has been disentangled from that of the Milky Way field by using a cleaning technique that successfully reproduced the stellar density, luminosity function and colour distributions of MW field stars. We have produced the cluster stellar density radial profile and a stellar density map from independent approaches, from which we found results in excellent agreement : the feature extends up to 3.5 times the cluster tidal radius. Previous works based on shallower photometric data sets have speculated on the existence of several long tidal tails, similar to that found in Pal 5. The present outcome shows that NGC 288 could hardly have such tails, but favours the notion that interactions with the MW tidal field has been a relatively inefficient process for stripping stars off the cluster. These results point to the need of a renewed overall study of the external regions of Galactic globular clusters (GGCs) in order to reliably characterise them. Hence, it will be possible to investigate whether there is any connection between detected tidal tails, extra-tidal stellar populations, extent diffuse halo-like structures with the GGCs’ dynamical histories in the Galaxy. A. Piatti Fri, 22 Sep 17 10/75 Comments: 7 pages, 3 figures. Accepted for publication in MNRAS # NuSTAR hard X-ray data and Gemini 3D spectra reveal powerful AGN and outflow histories in two low-redshift Lyman-$α$ blobs [GA] We have shown that Lyman-$\alpha$ blobs (LABs) may still exist even at $z\sim0.3$, about 7 billion years later than most other LABs known (Schirmer et al. 2016). Their luminous Ly$\alpha$ and [OIII] emitters at $z\sim0.3$ offer new insights into the ionization mechanism. This paper focuses on the two X-ray brightest LABs at $z\sim0.3$, SDSS J0113$+$0106 (J0113) and SDSS J1155$-$0147 (J1155), comparable in size and luminosity to B1′, one of the best-studied LABs at $z \gtrsim$ 2. Our NuSTAR hard X-ray (3–30 keV) observations reveal powerful active galactic nuclei (AGN) with $L_{2-10{\;\rm keV}}=(0.5$–$3)\times10^{44}$ erg cm$^{-2}$ s$^{-1}$. J0113 also faded by a factor of $\sim 5$ between 2014 and 2016, emphasizing that variable AGN may cause apparent ionization deficits in LABs. Joint spectral analyses including Chandra data constrain column densities of $N_{\rm H}=5.1^{+3.1}{-3.3}\times10^{23}$ cm$^{-2}$ (J0113) and $N{\rm H}=6.0^{+1.4}_{-1.1}\times10^{22}$ cm$^{-2}$ (J1155). J0113 is likely buried in a torus with a narrow ionization cone, but ionizing radiation is also leaking in other directions as revealed by our Gemini/GMOS 3D spectroscopy. The latter shows a bipolar outflow over $10$ kpc, with a peculiar velocity profile that is best explained by AGN flickering. X-ray analysis of J1155 reveals a weakly absorbed AGN that may ionize over a wide solid angle, consistent with our 3D spectra. Extinction corrected [OIII] log-luminosities are high, $\sim43.6$. The velocity dispersions are low, $\sim100$–$150$ km s$^{-1}$, even at the AGN positions. We argue that this is a combination of high extinction hiding the turbulent gas, and previous outflows that have cleared the escape paths for their successors. T. Kawamuro, M. Schirmer, J. Turner, et. al. Fri, 22 Sep 17 11/75 Comments: 15 pages, 17 Figures, accepted for publication in ApJ # Modeling $237$ Lyman-$α$ spectra of the MUSE-Wide survey [GA] We compare $237$ Lyman-$\alpha$ (Ly$\alpha$) spectra of the “MUSE-Wide survey” (Herenz et al. 2017) to a suite of radiative transfer simulations consisting of a central luminous source within a concentric, moving shell of neutral gas, and dust. This six parameter shell-model has been used numerously in previous studies, however, on significantly smaller data-sets. We find that the shell-model can reproduce the observed spectral shape very well – better than the also common Gaussian-minus-Gaussian’ model which we also fitted to the dataset. Specifically, we find that $\sim 94\%$ of the fits possess a goodness-of-fit value of $p(\chi^2)>0.1$. The large number of spectra allows us to robustly characterize the shell-model parameter range, and consequently, the spectral shapes typical for realistic spectra. We find that the vast majority of the Ly$\alpha$ spectral shapes require an outflow and only $\sim 5\%$ are well-fitted through an inflowing shell. In addition, we find $\sim 46\%$ of the spectra to be consistent with a neutral hydrogen column density $<10^{17}\,\mathrm{cm}^{-2}$ – suggestive of a non-negligible fraction of continuum leakers in the MUSE-Wide sample. Furthermore, we correlate the spectral against the Ly$\alpha$ halo properties against each other but do not find any strong correlation. M. Gronke Fri, 22 Sep 17 19/75 # Chemical enrichment and accretion of nitrogen-loud quasars [GA] We present rest-frame optical spectra of 12 nitrogen-loud quasars at z ~ 2.2, whose rest-frame ultraviolet (UV) spectra show strong nitrogen broad emission lines. To investigate their narrow-line region (NLR) metallicities, we measure the equivalent width (EW) of the [OIII]5007 emission line: if the NLR metallicity is remarkably high as suggested by strong UV nitrogen lines, the [OIII]5007 line flux should be very week due to the low equilibrium temperature of the ionized gas owing to significant metal cooling. In the result, we found that our spectra show moderate EW of the [OIII]5007 line similar to general quasars. This indicates nitrogen-loud quasars do not have extremely metal-rich gas clouds in NLRs. This suggests that strong nitrogen lines from broad-line regions (BLRs) are originated by exceptionally high abundances of nitrogen relative to oxygen without very high BLR metallicities. This result indicates that broad-emission lines of nitrogen are not good indicators of the BLR metallicity in some cases. On the other hand, we also investigate virial black-hole masses and Eddington ratios by using the Hbeta and CIV1549 lines for our sample. As a result, we found that black-hole masses and Eddington ratios of nitrogen-loud quasars tend to be low and high relative to normal quasars, suggesting that nitrogen-loud quasars seem to be in a rapidly-accreting phase. This can be explained in terms of a positive correlation between Eddington ratios and nitrogen abundances of quasars, that is probably caused by the connection between the mass accretion onto black holes and nuclear star formation. K. Matsuoka, T. Nagao, R. Maiolino, et. al. Fri, 22 Sep 17 20/75 Comments: 11 pages, 7 figures, accepted for publication in A&A # Modeling the Infrared Reverberation Response of the Circumnuclear Dusty Torus in AGN: The Effects of Cloud Orientation and Anisotropic Illumination [GA] The obscuring circumnuclear torus of dusty molecular gas is one of the major components of active galactic nuclei (AGN). The torus can be studied by analyzing the time response of its infrared (IR) dust emission to variations in the AGN continuum luminosity, a technique known as reverberation mapping. The IR response is the convolution of the AGN ultraviolet/optical light curve with a transfer function that contains information about the size, geometry, and structure of the torus. Here, we describe a new computer model that simulates the reverberation response of a clumpy torus. Given an input optical light curve, the code computes the emission of a 3D ensemble of dust clouds as a function of time at selected IR wavelengths, taking into account light travel delays. We present simulated dust emission responses at 3.6, 4.5, and 30 $\mu$m that explore the effects of various geometrical and structural properties, dust cloud orientation, and anisotropy of the illuminating radiation field. We also briefly explore the effects of cloud shadowing (clouds are shielded from the AGN continuum source). Example synthetic light curves have also been generated, using the observed optical light curve of the Seyfert 1 galaxy NGC 6418 as the input. The torus response is strongly wavelength-dependent, due to the gradient in cloud surface temperature within the torus, and because the cloud emission is strongly anisotropic at shorter wavelengths. Anisotropic illumination of the torus also significantly modifies the torus response, reducing the lag between the IR and optical variations. T. Almeyda, A. Robinson, M. Richmond, et. al. Fri, 22 Sep 17 25/75 Comments: 17 pages, 14 figures, published in the Astrophysical Journal (2017 July 1)
# distribution of iid sequence of integrable random variables I came across an interesting problem in Jacod's probability book. But have no idea how to approach it. Should I approach it using induction? Any ideas? Let $X_1, X_2, \cdots$ be an infinite sequence of iid sequence of integrable random variables and let $N$ be a positive, integer-valued integrable random variable which is independent from the sequence. Define $S_n = \sum_{k=1}^{n} X_k$ and assume that $S_0 = 0$. (a) Show that $E[S_N] = E[N]E[X_1]$. (b) Show that the characteristic function of $S_N$ is given by $E[\phi_{X_{1}}(t)^N]$, where $\phi_{X_{1}}$ is the characteristic function of $X_1$. - Incidentally, (a) is a special case of Wald's equation (en.wikipedia.org/wiki/Wald_equation). –  Nate Eldredge Dec 7 '10 at 22:38 For part (a) use $E[S_N]= \sum\nolimits_n {E[S_N |N = n]P(N = n)}$. This leads straightforwardly to the result. For part (b) use $E[e^{tS_N } ] = \sum\nolimits_n {E[e^{tS_N } |N = n]P(N = n)}$. Again, this leads straightforwardly to the result. Note that the key in both parts is "condition on $N$". –  Nate Eldredge Dec 7 '10 at 22:36
## Class files List are basic elements in a document, when used correctly they keep concepts organized and structured. This article explains how to create and modify numbered and unnumbered lists in L a T e X . # Introduction Lists are actually very simple to create. List are really easy to create \begin{itemize} \item One entry in the list \item Another entry in the list \end{itemize} To create a (unordered) list you have to declare the itemize environment and then put the entries inside. # Unordered lists The unordered (unnumbered) lists are produced by the itemize environment. Each entry must be preceded by the control sequence \item . \begin{itemize} \item The individual entries are indicated with a black dot, a so-called bullet. \item The text in the entries may be of any length. \end{itemize} By default the individual entries are indicated with a black dot, so-called bullet. The text in the entries may be of any length. # Ordered lists Ordered list have the same syntax inside a different environment: \begin{enumerate} \item The labels consists of sequential numbers. \item The numbers starts at 1 with every call to the enumerate environment. \end{enumerate} The ordered lists are generated by a \enumerate environment and each entry must be preceded by the control sequence \item , which will automatically generate the number labelling the item. The enumerate labels consists of sequential numbers, these numbers starts at 1 with every call to the enumerate environment. # Nested Lists In L a T e X you can insert a list inside another list. The above lists may be included within one another, either mixed or of one type, to a depth of four levels. \begin{enumerate} \item The labels consists of sequential numbers. \begin{itemize} \item The individual entries are indicated with a black dot, a so-called bullet. \item The text in the entries may be of any length. \end{itemize} \item The numbers starts at 1 with every call to the enumerate environment. \end{enumerate} # List styles As many other L a T e X elements, unordered and ordered list styles can be personalized. ## Ordered lists The numbering styles change depending on the depth of the nested lists: \begin{enumerate} \item First level item \item First level item \begin{enumerate} \item Second level item \item Second level item \begin{enumerate} \item Third level item \item Third level item \begin{enumerate} \item Fourth level item \item Fourth level item \end{enumerate} \end{enumerate} \end{enumerate} \end{enumerate} The default numbering scheme is: • Arabic number (1, 2, 3, ...) for Level 1 • Lowercase letter (a, b, c, ...) for Level 2 • Lowercase Roman numeral (i, ii, iii, ...) for Level 3 • Uppercase letter (A, B, C, ...) for Level 4. These numbers can be changed by redefining the commands that typeset the numbers of various list levels. For example: \renewcommand{\labelenumii}{\Roman{enumii}} \begin{enumerate} \item First level item \item First level item \begin{enumerate} \item Second level item \item Second level item \begin{enumerate} \item Third level item \item Third level item \begin{enumerate} \item Fourth level item \item Fourth level item \end{enumerate} \end{enumerate} \end{enumerate} \end{enumerate} The command \renewcommand{\labelenumii}{\Roman{enumii}} changes the second level to upper case Roman numeral. It is possible to change the labels of any level, replace labelenumii for one of the listed below. • \theenumi for Level 1 • \theenumii for Level 2 • \theenumiii for Level 3 • \theenumiv for Level 4 The command must be placed in the preamble to change the labels globally or right before \begin{enumerate} to change labels only in this list. There are some other styles, see the reference guide for a complete list. In numbered lists the counter is incremented by \item before it is printed, and starts from 1,a,i,A,I. This can be changed: \renewcommand{\labelenumii}{\Roman{enumii}} \begin{enumerate} \item First level item \item First level item \begin{enumerate} \setcounter{enumii}{4} \item Second level item \item Second level item \begin{enumerate} \item Third level item \item Third level item \begin{enumerate} \item Fourth level item \item Fourth level item \end{enumerate} \end{enumerate} \end{enumerate} \end{enumerate} To change the start number or letter you must use the \setcounter command. In the example, to change the start number of level 2 to V the command \setcounter{enumii}{4} was used. To set the start number to any other counter change enumii for any of these: • enumi for Level 1 • enumii for Level 2 • enumiii for Level 3 • enumiv for Level 4 ## Unordered lists The label scheme of unordered lists also changes depending on the depth of the nested list: \begin{itemize} \item First Level \begin{itemize} \item Second Level \begin{itemize} \item Third Level \begin{itemize} \item Fourth Level \end{itemize} \end{itemize} \end{itemize} \end{itemize} The default label scheme of itemized list for: • Level 1 is \textbullet (•), • Level 2 is \textendash (–) , • Level 3 is \textasteriskcentered (*) • Level 4 is \textperiodcentered (·). These labels can be changed by redefining the commands that typeset them for various list levels. For example, to change Level 1 to black square and Level 2 to white square we'll use : \renewcommand{\labelitemi}{$\blacksquare$} \renewcommand\labelitemii{$\square$} \begin{itemize} \item First Level \begin{itemize} \item Second Level \begin{itemize} \item Third Level \begin{itemize} \item Fourth Level \end{itemize} \end{itemize} \end{itemize} \end{itemize} The mathematical symbols used in the previous example belong to the amssymb package, so you have to add \usepackage{amssymb} to your preamble. To redefine the label use one of the next commands, depending on the level of list mark you intend to change: • labelitemi for Level 1 • labelitemii for Level 2 • labelitemiii for Level 3 • labelitemiv for Level 4 You can also change the item label for a specific entry, for example: \begin{itemize} \item Default item label for entry one \item Default item label for entry two \item[$\square$] Custom item label for entry three \end{itemize} All you have to do is pass the desired mark as a parameter inside brackets to the item line. # Reference guide Available styles for numbered lists: Code Description \alph Lowercase letter (a, b, c, ...) \Alph Uppercase letter (A, B, C, ...) \arabic Arabic number (1, 2, 3, ...) \roman Lowercase Roman numeral (i, ii, iii, ...) \Roman Uppercase Roman numeral (I, II, III, ...)
by _Mayday_ Tags: photography PF Gold P: 7,368 Quote by ~christina~ Nice panorama, turbo Thanks, ~~! Next time I get a clear day, I'll take the 5-minute trip over to that hill and shoot it again with manual settings and my new Manfrotto 808RC4 tripod head. I've had a really nice, heavy Bogen tripod sitting around unused for almost 20 years. I robbed the head off it to mount on a Gitzo Pro Studex that I picked up for $75 at an insurance-salvage store and sold the Gitzo when I went digital (back when digi-cams were all tiny). Finally had to spring for a real tripod head instead of messing around with jury-rigged alternatives. PF Gold P: 1,215 Quote by Ms Music Larkspur, I did take the picture the other day under the exact same lighting conditions, but in RAW, and the flowers still turned out very very bright blue. I will post if you want to look at the numbers, but so far I keep forgetting to upload the picture. Did you change the white balance later? I think most Raw files appear with the white balance set to some default level, but allow you to edit it later. PF Gold P: 818 Quote by turbo-1 Thanks, ~~! Next time I get a clear day, I'll take the 5-minute trip over to that hill and shoot it again with manual settings and my new Manfrotto 808RC4 tripod head. I've had a really nice, heavy Bogen tripod sitting around unused for almost 20 years. I robbed the head off it to mount on a Gitzo Pro Studex that I picked up for$75 at an insurance-salvage store and sold the Gitzo when I went digital (back when digi-cams were all tiny). Finally had to spring for a real tripod head instead of messing around with jury-rigged alternatives. Your lucky to have such a view after just taking a 5 min trip. I think you should try making a HDR shot. It would be great. Interesting, I got a tripod for Christmas, and want to use it to take pictures of a sunset. I haven't use it yet because it would look pretty funny to see me at the zoo, carrying a large tripod around as I'm not too tall. People already gave me funny looks when they saw me with a 300mm lens on my camera...imagine tripod too? lol PF Gold P: 5,450 A random castle from a 'frog perspective' to make it more interesting. But on the belly again. Anybody care naming the landmark? P: 172 Quote by matthyaouw Did you change the white balance later? I think most Raw files appear with the white balance set to some default level, but allow you to edit it later. I will have to sit down with my book to figure out how to edit the WB. I'll see if I can here soon, as the flowers are starting to get REALLY sad looking if I need to take more pics. My daughter also found a bunch of purple stuff (crayons, markers, a flower, etc) and the pic of even those came out blue. I thought it might be the flowers, but its like my camera can't see the color purple. Lovely castle Andre. I'd love to go to Europe some day and see castles. Admin P: 22,365 I have found this guy accidentally. http://www.panoramio.com/user/109117 As someone said about Larkspur - I would like to live in the world as he sees it. PF Gold P: 818 Quote by Andre A random castle from a 'frog perspective' to make it more interesting. But on the belly again. Wow, I love the bridge there. Nice castle, dirty moat. PF Gold P: 5,450 Quote by ~christina~ Wow, I love the bridge there. Nice castle, dirty moat. Thanks, ~Christina~ about the moat, it's in the eye of the beholder. Actually the water is clear and there is no trace of pollution. What you see is a body of floating water plants, holding an abundance of life. P: 22,365 Quote by Borek I can try to prepare a panorama from my roof one day, but it will be boring flat and full of boring flats OK, so I went up the roof, took the pictures and stiched them: http://www.bpp.com.pl/IMG/panorama_z_dachu.jpg Beware - it is wiiiiiiiiiide It is around sunset, taken at ISO 1600, thus noisy. P: 1,296 Nice birds eye view. Matching up the roof top shingles, will be next to impossible. PF Gold P: 7,368 It IS wiiiide! and flat with lots of flats. Admin P: 22,365 When we moved here in 2000 half of the buildings that surround us now didn't exist. These yellow ones with blue roofs, these 'high' with red roofs and buildings on the far right (with a high chimney behind) were all build since. PF Gold P: 818 Quote by Andre Thanks, ~Christina~ about the moat, it's in the eye of the beholder. Actually the water is clear and there is no trace of pollution. What you see is a body of floating water plants, holding an abundance of life. Ah okay. Let me rephrase that sentence. There's alot of algae in the water. But then again, you'd think they'd clean that plant life up when they can afford to own a castle... PF Gold P: 5,450 Quote by ~christina~ Ah okay. Let me rephrase that sentence. There's alot of algae in the water. But then again, you'd think they'd clean that plant life up when they can afford to own a castle... It's not algea but a variety of water plants. The water is clear But would you really want to destroy this little critters biotope? (image Wikipedia) Emeritus Sci Advisor PF Gold P: 10,424 Andre's... critter... sure is a hard act to follow, but, uh, please allow me to introduce my girlfriend, Jessica. - Warren
## Thursday, 6 September 2012 ### A clarifying view of Weyl's criterion Consider an arbitrary sequence $(x_n)$ on the circle $\mathbf{T} = \mathbf{R}/\mathbf{Z}$. If $(x_n)$ is sufficiently well behaved, then often it will have a sort of limiting distribution, which would be usefully represented by a measure $\mu$. For instance, if $x_n$ alternates between $0$ and $1/2$ somewhat regularly, then this limiting distribution $\mu$ is an atomic measure localised at $0$ and $1/2$. Other sequences, such as $\alpha n \text{ mod$1$}$ with $\alpha$ irrational, are equidistributed, by which I mean that the limiting distribution $\mu$ is just the uniform measure. Finally, some sequences do not have a well defined limiting distribution at all, e.g. the sequence which spends 1 year at home, then 10 years at its summer home, then 100 years at home, then 1000 years at the summer home, etc. There are two ways to make this rigorous: 1. We say $(x_n)$ has a limiting distribution if the limit $$D(f) = \lim_{N\to\infty} \frac{1}{N}\sum_{n=1}^N f(x_n)$$exists for every continuous function $f:\mathbf{T}\to\mathbf{C}$. In this case $D$ defines a linear functional on $C(\mathbf{T})$ such that $D(1)=1$ and $f\geq 0$ implies $D(f)\geq 0$, so Riesz's representation theorem implies that $D$ is represented by a probability measure $\mu$: $$D(f) = \int_\mathbf{T} f\, d\mu.$$ 2. There's another way of saying (pretty much) the same thing, in case you don't like Riesz. Namely, for each $N$ consider the atomic measure $\mu_N$ which gives a mass $1/N$ to each of $x_1, ..., x_N$, and then take a weak limit of some subsequence of $(\mu_N)$ (recall that the space of regular Borel measures on $\mathbf{T}$ is weakly compact). We say that $(x_n)$ has a limiting distribution if this weak limit is independent of subsequence, i.e. if $(\mu_N)$ weakly converges. Next we say that $(x_n)$ is equidistributed if it has the uniform distribution as its limiting distribution. Now a probability measure on $\mathbf{T}$ is the uniform distribution iff all its nonzero Fourier coefficients vanish. But if $\mu$ is the limiting distribution of $(x_n)$ then $$\hat{\mu}(k) = \int_0^1 e^{-i2\pi k x} d\mu(x) = \lim_{N\to\infty} \frac{1}{N}\sum_{n=1}^N e^{-i2\pi k x_n},$$so this comes down to exactly Weyl's criterion. Taking the weak limit point of view rather than Riesz's, we can additionally conclude the existence of the limiting distribution from Weyl's criterion: if you take the weak limit $\mu$ of some subsequence, Weyl's criterion implies that this $\mu$ is uniform. Since all convergent subsequences then converge to the same limit, the sequence is actually convergent. In fact, now that I'm thinking about it, this reasoning gives a sort of generalized Weyl's criterion for the existence of a limiting distribution. Namely, the "Fourier coefficients" $$\lim_{N\to\infty}\frac{1}{N}\sum_{n=1}^N e^{-i2\pi k x_n}$$all exist (they need not be $0$) iff $(x_n)$ has a limiting distribution. #### 1 comment: 1. See http://www.echolalie.org/curves for an illustration.
Krishna 0 Step 1: Use the relationship between circumference and radius to find the radius of the circle. NOTE: Circumference C = 2 \pi r r = \frac{C}{2*\pi} Find the radius of the circle EXAMPLE: r = \frac{C}{2*\pi}​ r = \frac{22}{2*\pi} (since \pi = \frac{22}{7} = 3.14) r = \frac{7}{2} Step 2: Calculate the area of the circle. Total area of the circle = \pi r^2 Step 3: Set up a formula for the sector area NOTE: A ratio will need to be constructed. Recall that a circle is composed of 360 degrees. Therefore, the following ratio can be made, \frac{\theta}{360} = \frac{\text{sector area} (A_C)}{\text{Total area} (A_T)} where, \theta = Central angle Step 3: Plug the sector’s central angle measurement into the formula. Step 4: Plug the given or calculated area measurement into the formula. EXAMPLE: \frac{26}{360} = \frac{\text{sector area} (A_C)}{\text{46} (A_T)} Step 5: Solve the area. EXAMPLE: \frac{\theta}{360}=\frac{Sector\ area\ \left(A_C\right)}{Total\ area\ \left(A_T\right)} \frac{26}{360} = \frac{\text{sector area} (A_C)}{\text{46} (A_T)} \frac{26}{360} * 46 = \text{Sector area} (A_C) sector area = 3.32
# Math Help - Test 1. ## Test . . $\Large\begin{array}{c} \curlyvee\!\! \curlyvee\!\! \curlyvee\! \curlyvee \\ [-3.6mm] \curlywedge\!\! \curlywedge\!\! \curlywedge\! \curlywedge \\ [-3.3mm] \curlyvee\!\! \curlyvee\!\! \curlyvee\! \curlyvee \\ [-3.6mm] \curlywedge\!\! \curlywedge\!\! \curlywedge\! \curlywedge \end{array}$ . . . $\huge\begin{array}{c}\top\!\!\!\! \dashv \\ [-6.5mm] \vdash\:\!\!\!\!\! \bot \end{array}$ . . $\large \begin{array}{cc}\backslash\text{curlyvee} & \curlyvee \\ \backslash\text{curlywedge} & \curlywedge \\ \backslash\text{top} & \top \\ \backslash\text{bot} & \bot \\ \backslash\text{vdash} & \vdash \\ \backslash\text{dashv} & \dashv \\ \backslash\text{maltese} & \maltese \\ \backslash\text{multimap} & \multimap \\ \backslash\text{circledcirc} & \circledcirc \\ \backslash\text{boxdot} & \boxdot \\ \backslash\text{boxplus} & \boxplus \\ \backslash\text{boxtimes} & \boxtimes \end{array} \qquad\begin{array}{cc} \backslash\text{varkappa} & \varkappa \\ \backslash\text{Cup} & \Cup \\ \backslash\text{Cap} & \Cap \\ \backslash\text{Subset} & \Subset \\ \backslash\text{Supset} & \Supset \\ \backslash S & \S \\ \backslash\text{between} & \between \\ \backslash\text{checkmark} & \checkmark \\ \backslash\text{bowtie} & \bowtie \\ \backslash\text{divideontimes} & \divideontimes \\ \backslash\text{Rrightarrow} & \Rrightarrow \\ \backslash\text{Lleftarrow} & \Lleftarrow \end{array}$ 2. ## Re: Test $\frac { 5(x^2 y^3 z ^{1/2})^{-4} }{20(x^3 y^{-2} z^1)^5}$ $= \frac { 1 x^{-8}y^{-12}z^{-2}}{4x^{15}y^{-10}z^5}$ $= \frac {1}{4}x^{-8-15}y^{-12-(-10)}z^{-2-5}$ $= \frac {1}{4}x^{-23}y^{-2}z^{-7}$ $= \frac {1}{4x^{23}y^2z^7}$ 3. ## Re: Test TEST I try to do the following integral $\left \int_0^t v(t) = \int_0^t v_0 + v_c \{sin(\omega t + \phi) - sin(\phi)\} \right$ I end up with $x(t) = x_0 + (\frac{v_c}{\omega}) \{cos \phi - cos(\omega t + \phi)\}$ $x(t) = x_0 + (\frac{v_c}{\omega}) \[\{(\frac{v_o}{v_c}) - sin \phi\} \omega t + cos \phi - cos(\omega t + \phi)\}$ $\{(\frac{v_o}{v_c}) - sin \phi\} \omega t$
# Sum of Products Help 1. Sep 3, 2009 ### tstuddud I don't quite understand the method to solve this type of question. Let x=(-3,2,5), y=(2,4,-5), and z=(1,6,7). Calculate: File size: 1.7 KB Views: 59 File size: 1.7 KB Views: 58 2. Sep 3, 2009 ### NJunJie I view such qns playing with 'arrays' and 'susbstituion'. Generally, i will view it this way:- x is an array of (-3,2,5) y is an array of (2,4,-5) first part is u do the summation first - i call it inner. Inner: (-3)(2) + (2)(4) Then you do the Products - i call it outer. But is your question complete? Theres no 'j' in your formulaes pasted. 3. Sep 3, 2009 ### HallsofIvy Staff Emeritus What you have written, $$\prod_{j= 1}^3\sum_{i=1}^2 x_iy_i$$ and $$\sum_{j=1}^3\prod_{i=1}^2 x_iy_i$$ are just $$\prod_{j=1}^3(x_1y_1+ x_2y_2+ x_3y_3)= \prod_{j=1}^3((-3)(2)+ (2)(4)+ (5)(-5))= \prod_{j=1}^3(-6+ 8- 10)= 3(8)= 24$$ and $$\sum{j= 1}^3((x_1y_1)(x_2y_2))= \sum_{j=1}^3 (-3)(2)(2)(4)= \sum_{j=1}^3 48= 3(48)= 144$$ But I suspect you meant $$\prod_{j=1}^3\sum_{i= 1}^2 x_iy_j$$ and $$\sum{j=1}^3\Pi_{i=1}^2 x_iy_j$$ The first of those is $$\prod_{j=1}^3(x_1+ x_2)y_j= (x_1+ x_2)\prod_{j=1}^3y_i= (x_1+ x_2)(y_1y_2y_3)$$ surely you can do that arithmetic yourself.
# Dumb conversion question I'm sorry for posting this, but I'm really confused by the following. The problem states to use units of 1000 TOE("tons of oil equivelent, where 1 ton = 1000 kg) So if I'm given 1698 kg oil equivalent, I need to divide by 1000 twice to get to the units of 1000 TOE? Thanks.
# Ampersand should be escaped on export Bug #533726 reported by arno_b on 2010-03-07 This bug affects 1 person Affects Status Importance Assigned to Milestone Medium ### Bug Description Ampersand characters (&) are not escaped in the generated Latex code, this has to be done manually. arno_b (arno.b) on 2010-03-07 Changed in latexdraw: assignee: nobody → arno_b (arno.b) arno_b (arno.b) wrote on 2010-03-07: #1 In fact, I will not correct that: if I do it, a latexdraw user will no more be able to create a table (using tabluar) since the format of a table uses &; they will be replaced by \& and the table will not compile. More generally, latexdraw is not an interface to latex as latex is: a text in latexdraw *must* be formatted in latex as if you write a latex document. Thus, I will not add a \ character when a text contains a &. Changed in latexdraw: status: Triaged → Invalid milestone: 2.0.7 → none
# Data Misfit¶ The data misfit using an l_2 norm is: $\mu_\text{data} = {1\over 2}\left| \mathbf{W}_d (\mathbf{d}_\text{pred} - \mathbf{d}_\text{obs}) \right|_2^2$ If the field, u, is provided, the calculation of the data is fast: \begin{align}\begin{aligned}\mathbf{d}_\text{pred} = \mathbf{Pu(m)}\\\mathbf{R} = \mathbf{W}_d (\mathbf{d}_\text{pred} - \mathbf{d}_\text{obs})\end{aligned}\end{align} Where P is a projection matrix that brings the field on the full domain to the data measurement locations; u is the field of interest; d_obs is the observed data; and $$\mathbf{W}_d$$ is the weighting matrix. The derivative of this, with respect to the model, is: $\frac{\partial \mu_\text{data}}{\partial \mathbf{m}} = \mathbf{J}^\top \mathbf{W}_d \mathbf{R}$ The second derivative is: $\frac{\partial^2 \mu_\text{data}}{\partial^2 \mathbf{m}} = \mathbf{J}^\top \mathbf{W}_d \mathbf{W}_d \mathbf{J}$ ## The API¶ class SimPEG.DataMisfit.BaseDataMisfit(survey, **kwargs)[source] Note You should inherit from this class to create your own data misfit term. debug = False Print debugging information counter = None Set this to a SimPEG.Utils.Counter() if you want to count things eval(m, f=None)[source] Parameters: m (numpy.array) – geophysical model f (Fields) – fields float data misfit evalDeriv(m, f=None)[source] Parameters: m (numpy.array) – geophysical model f (Fields) – fields numpy.array data misfit derivative eval2Deriv(m, v, f=None)[source] Parameters: m (numpy.array) – geophysical model v (numpy.array) – vector to multiply f (Fields) – fields numpy.array data misfit derivative ## Common Data Misfits¶ ### l2 norm¶ class SimPEG.DataMisfit.l2_DataMisfit(survey, **kwargs)[source] The data misfit with an l_2 norm: $\mu_\text{data} = {1\over 2}\left| \mathbf{W}_d (\mathbf{d}_\text{pred} - \mathbf{d}_\text{obs}) \right|_2^2$ Wd getWd(survey) The data weighting matrix. The default is based on the norm of the data plus a noise floor. Return type: scipy.sparse.csr_matrix Wd eval(m, f=None)[source] evalDeriv(m, f=None)[source] eval2Deriv(m, v, f=None)[source]
Implied volatility: general properties and asymptotics open access Abstract This thesis investigates implied volatility in general classes of stock price models. To begin with, we take a very general view. We find that implied volatility is always, everywhere, and for every expiry well-defined only if the stock price is a non-negative martingale. We also derive sufficient and close to necessary conditions for an implied volatility surface to be free from static arbitrage. In this context, free from static arbitrage means that the call price surface generated by the implied volatility surface is free from static arbitrage. We also investigate the small time to expiry behaviour of implied volatility. We do this in almost complete generality, assuming only that the call price surface is non-decreasing and right continuous in time to expiry and that the call surface satisfies the no-arbitrage bounds (S-K)+≤ C(K, τ)≤ S. We used S to denote the current stock price, K to be a option strike price, τ denotes time to expiry, and C(K, τ) the price of the K strike option expiring in τ time units. Under these weak assumptions, we obtain exact asymptotic formulae relating the call price surface and the implied volatility surface close to expiry. We apply our general asymptotic formulae to determining the small time to expiry behaviour of implied volatility in a variety of models. We consider exponential Lévy models, obtaining new and somewhat surprising results. We then investigate the behaviour close to expiry of stochastic volatility models in the at-the-money case. Our results generalise what is already known and by a novel method of proof. In the not at-the-money case, we consider local volatility models using classical results of Varadhan. In obtaining the asymptotics for local volatility models, we use a representation of the European call as an integral over time to expiry. We devote an entire chapter to representations of the European call option; a key role is played by local time and the argument of Klebaner. A novel alternative that is especially useful in the local volatility case is also presented. Author(s) Roper, Michael Paul Veran Goldys, Ben Rutkowski, Marek 2009 Thesis PhD Doctorate Files whole.pdf 793.93 KB Adobe Portable Document Format