repo stringlengths 3 91 | file stringlengths 16 152 | code stringlengths 0 3.77M | file_length int64 0 3.77M | avg_line_length float64 0 16k | max_line_length int64 0 273k | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
monodle | monodle-main/tools/loc_error_by_shifting.py | import numpy as np
from PIL import Image
from lib.datasets.kitti.kitti_utils import Calibration
image_file = '../data/KITTI/object/training/image_2/000000.png'
image = Image.open(image_file)
calib_file = '../data/KITTI/object/training/calib/000000.txt'
calib = Calibration(calib_file)
img_w, img_h = image.size[0], im... | 869 | 27.064516 | 65 | py |
monodle | monodle-main/lib/helpers/decode_helper.py | import numpy as np
import torch
import torch.nn as nn
from lib.datasets.utils import class2angle
def decode_detections(dets, info, calibs, cls_mean_size, threshold):
'''
NOTE: THIS IS A NUMPY FUNCTION
input: dets, numpy array, shape in [batch x max_dets x dim]
input: img_info, dict, necessary informat... | 7,269 | 34.637255 | 117 | py |
monodle | monodle-main/lib/helpers/scheduler_helper.py | import torch.nn as nn
import torch.optim.lr_scheduler as lr_sched
import math
def build_lr_scheduler(cfg, optimizer, last_epoch):
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in cfg['decay_list']:
if cur_epoch >= decay_step:
cur_decay = cur_decay * cfg['decay_ra... | 3,831 | 29.903226 | 100 | py |
monodle | monodle-main/lib/helpers/trainer_helper.py | import os
import tqdm
import torch
import numpy as np
import torch.nn as nn
from lib.helpers.save_helper import get_checkpoint_state
from lib.helpers.save_helper import load_checkpoint
from lib.helpers.save_helper import save_checkpoint
from lib.losses.centernet_loss import compute_centernet3d_loss
class Trainer(ob... | 3,871 | 34.522936 | 123 | py |
monodle | monodle-main/lib/helpers/save_helper.py | import os
import torch
import torch.nn as nn
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def get_checkpoint_state(model=None, optimizer=None, epoch=None):
... | 1,501 | 33.136364 | 87 | py |
monodle | monodle-main/lib/helpers/optimizer_helper.py | import math
import torch
import torch.optim as optim
from torch.optim.optimizer import Optimizer
def build_optimizer(cfg_optimizer, model):
weights, biases = [], []
for name, param in model.named_parameters():
if 'bias' in name:
biases += [param]
else:
weights += [param]... | 5,576 | 41.9 | 116 | py |
monodle | monodle-main/lib/helpers/model_helper.py | from lib.models.centernet3d import CenterNet3D
def build_model(cfg):
if cfg['type'] == 'centernet3d':
return CenterNet3D(backbone=cfg['backbone'], neck=cfg['neck'], num_class=cfg['num_class'])
else:
raise NotImplementedError("%s model is not supported" % cfg['type'])
| 296 | 26 | 98 | py |
monodle | monodle-main/lib/helpers/dataloader_helper.py | import torch
import numpy as np
from torch.utils.data import DataLoader
from lib.datasets.kitti.kitti_dataset import KITTI_Dataset
# init datasets and dataloaders
def my_worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def build_dataloader(cfg, workers=4):
# perpare dataset... | 1,336 | 35.135135 | 78 | py |
monodle | monodle-main/lib/helpers/tester_helper.py | import os
import tqdm
import torch
from lib.helpers.save_helper import load_checkpoint
from lib.helpers.decode_helper import extract_dets_from_outputs
from lib.helpers.decode_helper import decode_detections
class Tester(object):
def __init__(self, cfg, model, dataloader, logger, eval=False):
self.cfg =... | 4,732 | 38.441667 | 121 | py |
monodle | monodle-main/lib/helpers/utils_helper.py | import torch
import numpy as np
import logging
import random
def create_logger(log_file, rank=0):
log_format = '%(asctime)s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.INFO if rank == 0 else 'ERROR',
format=log_format,
filename=log_file)
c... | 844 | 31.5 | 69 | py |
monodle | monodle-main/lib/models/centernet3d.py | import os
import cv2
import torch
import torch.nn as nn
import numpy as np
from lib.backbones import dla
from lib.backbones.dlaup import DLAUp
from lib.backbones.hourglass import get_large_hourglass_net
from lib.backbones.hourglass import load_pretrian_model
class CenterNet3D(nn.Module):
def __init__(self, back... | 2,712 | 32.085366 | 129 | py |
monodle | monodle-main/lib/datasets/utils.py | ''' some auxiliary functions for all datasets '''
import numpy as np
import cv2
num_heading_bin = 12 # hyper param
def angle2class(angle):
''' Convert continuous angle to discrete class and residual. '''
angle = angle % (2 * np.pi)
assert (angle >= 0 and angle <= 2 * np.pi)
angle_per_class = 2 * np.... | 4,405 | 34.532258 | 135 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_dataset.py | import os
import numpy as np
import torch.utils.data as data
from PIL import Image
from lib.datasets.utils import angle2class
from lib.datasets.utils import gaussian_radius
from lib.datasets.utils import draw_umich_gaussian
from lib.datasets.kitti.kitti_utils import get_objects_from_label
from lib.datasets.kitti.kitti... | 13,676 | 42.419048 | 136 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_utils.py | ''' some auxiliary functions for KITTI dataset '''
import numpy as np
import cv2
################ Object3D ##################
def get_objects_from_label(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
class Object3... | 14,050 | 36.073879 | 125 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/rotate_iou.py | #####################
# Based on https://github.com/hongzhenwang/RRPN-revise
# Licensed under The MIT License
# Author: yanyan, scrin@foxmail.com
#####################
import math
import numba
import numpy as np
from numba import cuda
@numba.jit(nopython=True)
def div_up(m, n):
return m // n + (m % n > 0)
@cuda... | 11,552 | 33.903323 | 95 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/evaluate.py | import time
import fire
import .kitti_common as kitti
from .eval import get_coco_eval_result, get_official_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def evaluate(label_path,
result_path,
... | 908 | 26.545455 | 74 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/kitti_common.py | import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type=... | 15,309 | 36.070218 | 79 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/eval.py | import numpy as np
import numba
import io as sysio
from .rotate_iou import rotate_iou_gpu_eval
DISTANCE_COVER = False
@numba.jit
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scor... | 42,956 | 42 | 118 | py |
monodle | monodle-main/lib/losses/uncertainty_loss.py | import numpy as np
import torch
def laplacian_aleatoric_uncertainty_loss(input, target, log_variance, reduction='mean'):
'''
References:
MonoPair: Monocular 3D Object Detection Using Pairwise Spatial Relationships, CVPR'20
Geometry and Uncertainty in Deep Learning for Computer Vision, Universi... | 1,082 | 35.1 | 95 | py |
monodle | monodle-main/lib/losses/dim_aware_loss.py | import torch
import torch.nn.functional as F
def dim_aware_l1_loss(input, target, dimension):
dimension = dimension.clone().detach()
loss = torch.abs(input - target)
loss /= dimension
with torch.no_grad():
compensation_weight = F.l1_loss(input, target) / loss.mean()
loss *= compensation_w... | 521 | 22.727273 | 68 | py |
monodle | monodle-main/lib/losses/focal_loss.py | import torch
import torch.nn as nn
def focal_loss(input, target, alpha=0.25, gamma=2.):
'''
Args:
input: prediction, 'batch x c x h x w'
target: ground truth, 'batch x c x h x w'
alpha: hyper param, default in 0.25
gamma: hyper param, default in 2.0
Reference: Focal Loss ... | 1,687 | 24.19403 | 86 | py |
monodle | monodle-main/lib/losses/centernet_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.helpers.decode_helper import _transpose_and_gather_feat
from lib.losses.focal_loss import focal_loss_cornernet
from lib.losses.uncertainty_loss import laplacian_aleatoric_uncertainty_loss
from lib.losses.dim_aware_loss import dim_aware_l1_loss... | 5,440 | 40.853846 | 131 | py |
monodle | monodle-main/lib/backbones/dla.py | import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BatchNorm = nn.BatchNorm2d
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return os.path.join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x... | 14,554 | 34.413625 | 91 | py |
monodle | monodle-main/lib/backbones/dlaup.py | import os, sys
import math
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(ROOT_DIR)
import numpy as np
import torch
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernal_szie=3, stride=1, bias=... | 8,927 | 36.991489 | 104 | py |
monodle | monodle-main/lib/backbones/hourglass.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pa... | 11,068 | 31.365497 | 118 | py |
PalmTree | PalmTree-master/src/config.py | """
Configuration file.
"""
VOCAB_SIZE = 10000
USE_CUDA = True
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
| 127 | 10.636364 | 24 | py |
PalmTree | PalmTree-master/src/train_palmtree.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
from config import *
import numpy as np
import palmtree
from palmtree import dataset
from palmtree import trainer
import pickle as pkl
print(palmtree.__file__)
vocab_path = "cd... | 2,202 | 32.378788 | 117 | py |
PalmTree | PalmTree-master/src/palmtree/__init__.py | from .model import BERT
| 24 | 11.5 | 23 | py |
PalmTree | PalmTree-master/src/palmtree/trainer/pretrain.py | import torch
import torch.nn as nn
from torch.optim import Adam, AdamW
from torch.utils.data import DataLoader
from ..model import BERTLM, BERT
from .optim_schedule import ScheduledOptim
import tqdm
class BERTTrainer:
"""
BERTTrainer make the pretrained BERT model with two LM training method.
1. Ma... | 6,299 | 39.645161 | 192 | py |
PalmTree | PalmTree-master/src/palmtree/trainer/__init__.py | from .pretrain import BERTTrainer
| 34 | 16.5 | 33 | py |
PalmTree | PalmTree-master/src/palmtree/trainer/optim_schedule.py | '''A wrapper class for optimizer '''
import numpy as np
class ScheduledOptim():
'''A simple wrapper class for learning rate scheduling'''
def __init__(self, optimizer, d_model, n_warmup_steps):
self._optimizer = optimizer
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = 0
... | 1,069 | 28.722222 | 72 | py |
PalmTree | PalmTree-master/src/palmtree/dataset/dataset.py | from torch.utils.data import Dataset
import tqdm
import torch
import random
import pickle as pkl
class BERTDataset(Dataset):
def __init__(self, dfg_corpus_path, cfg_corpus_path, vocab, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
self.vocab = vocab
self.seq_len = seq_len
... | 8,441 | 36.189427 | 135 | py |
PalmTree | PalmTree-master/src/palmtree/dataset/vocab.py | import pickle
import tqdm
from collections import Counter
class TorchVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used to build the Vocab.
stoi:... | 6,797 | 35.352941 | 93 | py |
PalmTree | PalmTree-master/src/palmtree/dataset/__init__.py | from .dataset import BERTDataset
from .vocab import WordVocab
| 62 | 20 | 32 | py |
PalmTree | PalmTree-master/src/palmtree/model/bert.py | import torch.nn as nn
from .transformer import TransformerBlock
from .embedding import BERTEmbedding
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size, hidden=768, n_layers=12, attn_heads=12, dropout=0.1):
"""
... | 2,088 | 29.720588 | 97 | py |
PalmTree | PalmTree-master/src/palmtree/model/transformer.py | import torch.nn as nn
from .attention import MultiHeadedAttention
from .utils import SublayerConnection, PositionwiseFeedForward
class TransformerBlock(nn.Module):
"""
Bidirectional Encoder = Transformer (self-attention)
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
... | 1,276 | 38.90625 | 110 | py |
PalmTree | PalmTree-master/src/palmtree/model/__init__.py | from .bert import BERT
from .language_model import BERTLM
| 58 | 18.666667 | 34 | py |
PalmTree | PalmTree-master/src/palmtree/model/language_model.py | import torch.nn as nn
import torch
from .bert import BERT
class BERTLM(nn.Module):
"""
BERT Language Model
Next Sentence Prediction Model + Masked Language Model
"""
def __init__(self, bert: BERT, vocab_size):
"""
:param bert: BERT model which should be trained
:param voc... | 1,740 | 24.602941 | 68 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/bert.py | import torch.nn as nn
from .token import TokenEmbedding
from .position import PositionalEmbedding
from .segment import SegmentEmbedding
class BERTEmbedding(nn.Module):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal embedding matrix
2. PositionalEmbedding :... | 1,261 | 37.242424 | 88 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/position.py | import torch.nn as nn
import torch
import math
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
posi... | 710 | 26.346154 | 95 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/segment.py | import torch.nn as nn
class SegmentEmbedding(nn.Embedding):
def __init__(self, embed_size=512):
super().__init__(3, embed_size, padding_idx=0)
| 157 | 21.571429 | 54 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/token.py | import torch.nn as nn
class TokenEmbedding(nn.Embedding):
def __init__(self, vocab_size, embed_size=512):
super().__init__(vocab_size, embed_size, padding_idx=0)
| 176 | 24.285714 | 63 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/__init__.py | from .bert import BERTEmbedding
| 32 | 15.5 | 31 | py |
PalmTree | PalmTree-master/src/palmtree/model/attention/multi_head.py | import torch.nn as nn
from .single import Attention
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_... | 1,268 | 32.394737 | 91 | py |
PalmTree | PalmTree-master/src/palmtree/model/attention/__init__.py | from .multi_head import MultiHeadedAttention
from .single import Attention
| 75 | 24.333333 | 44 | py |
PalmTree | PalmTree-master/src/palmtree/model/attention/single.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import math
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ mat... | 596 | 21.961538 | 66 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/gelu.py | import torch.nn as nn
import torch
import math
class GELU(nn.Module):
"""
Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
| 301 | 22.230769 | 100 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/feed_forward.py | import torch.nn as nn
from .gelu import GELU
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
... | 488 | 27.764706 | 67 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/sublayer.py | import torch.nn as nn
from .layer_norm import LayerNorm
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()... | 565 | 28.789474 | 71 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/__init__.py | from .feed_forward import PositionwiseFeedForward
from .layer_norm import LayerNorm
from .sublayer import SublayerConnection
from .gelu import GELU
| 148 | 28.8 | 49 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/layer_norm.py | import torch.nn as nn
import torch
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(feature... | 519 | 27.888889 | 66 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/registers.py |
"""
The Design of Instruction Decoder
The reference come from :
1. Intel® 64 and IA-32 Architectures Software Developer’s Manual: https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
2. x64_cheatsheet: https://cs.bro... | 4,474 | 18.123932 | 220 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/data_loader.py | """"
Here we implement a class for loading data.
"""
import torch
from torch.autograd import Variable
from vocab import *
from config import *
import numpy as np
import random
import re
np.random.seed(0)
class DataLoader:
EOS = 0 # to mean end of sentence
UNK = 1 # to mean unknown token
maxlen = MAXL... | 3,295 | 30.390476 | 113 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/gemini_feature_extraction_palmtree.py | import glob
import pickle
import queue
import time
import re
import os
import numpy as np
import eval_utils as utils
import binaryninja as binja
from obj import Obj
CALL_INST = {binja.LowLevelILOperation.LLIL_CALL, binja.LowLevelILOperation.LLIL_CALL_PARAM,
binja.LowLevelILOperation.LLIL_CALL_OUTPUT_SSA... | 7,463 | 34.542857 | 135 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/obj.py | class Obj:
pass
| 20 | 6 | 10 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/vocab.py | """
This code has been taken and modified from https://github.com/ryankiros/skip-thoughts
Constructing and loading dictionaries
"""
import _pickle as pkl
from collections import OrderedDict
import argparse
import re
def build_dictionary(text):
"""
Build a dictionary
text: list of sentences (pre-tokenized)... | 2,386 | 26.436782 | 91 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/config.py | """
Configuration file.
"""
VOCAB_SIZE = 10000
USE_CUDA = True
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
LEARNING_RATE=1e-5
| 147 | 10.384615 | 24 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/eval_utils.py | from model import UniSkip, Encoder
from data_loader import DataLoader
from vocab import load_dictionary
from config import *
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import re
import numpy as np
import pickle
class UsableTransformer:
# @profile
def... | 1,852 | 28.412698 | 63 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/embedding.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# to use tfdbg
# wrap session object with debugger wrapper
from tensorflow.python import debug as tf_debug
from random import shuffle
from scipy.linalg import block_diag
import tensorflow as tf
import numpy as... | 9,614 | 30.628289 | 134 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/siamese_emb.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
flags = tf.app.flags
FLAGS = flags.FLAGS
class Siamese:
#calculate embedding
def emb_generation(self, x, n):
mul_mat = x[:, FLAGS.vector_size:]
x = x[:, :FLAGS.vector_size]
# tf.reset_default_grap... | 8,987 | 43.49505 | 145 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/dataset.py | import glob
import random
from collections import defaultdict
import tensorflow as tf
import numpy as np
import pickle as p
from numpy.random import choice, permutation
from itertools import combinations
import util
import os
import sys
import re
import operator
from functools import reduce
sys.path.append(os.path.j... | 8,285 | 35.342105 | 124 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/obj.py | class Obj:
pass
| 20 | 6 | 10 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/util.py | import os
import sys
#getting all file list in a directory
def get_files(directory):
file_list = []
for root, dirc, files in os.walk(directory):
for file in files:
file_list.append(os.path.join(root, file))
return file_list | 234 | 18.583333 | 45 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/__init__.py | 0 | 0 | 0 | py | |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/emb_train.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Siamese graph embedding implementaition using tensorflow
By:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle as pkl
import time
import random
import nltk
os.environ["CUDA_VISIBLE... | 13,215 | 38.687688 | 188 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/save_embeddings.py | #!/usr/bin/python
import argparse
import os
import pickle
import sqlite3
import sys
import base64
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec
embeddings = {}
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--embed_pickle_path', dest='embed_p... | 3,548 | 30.6875 | 183 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/insn_int.py | '''
transfer the instructions to integer
or transfer the integer to instructions
input: int list
output: one integer
'''
def insn2int_inverse(insn_list):
'''
transfer the instruction to integer with inverse order
example:
[72,137,229] ==>15042888 (72+137*256+229*256*256)
[243, 15, 16, 13, 205, 0, ... | 1,096 | 28.648649 | 134 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/prep_embed_input.py | '''
Input: the whole dataset (in order to get the whole vocabulary)
Output:
output_path: the input for embedding model
error_path: save all the error information in (especially when two distinct instructions map to same integer)
int2insn_map_path: the map information(int -> insn (int list))
'''
import pickle
import ar... | 4,326 | 39.064815 | 205 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/train_embed.py | '''
train the embedding model in order to get the vectors representing each instructions
the vectors embed the semantic information of instruction inside
the input: the output of prep_embed_input (output_path)
the output: the embedding model and mapping between the integer to vectors
'''
import os
import sys
import a... | 10,473 | 40.729084 | 181 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/test/dataset.py | import pickle
import os
import numpy as np
from multiprocessing import Pool
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def approximate_type(type_str):
int_list = ['_Bool', 'unsigned int', 'int', 'long long int', '... | 9,019 | 38.911504 | 136 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/test/eval.py | import tensorflow as tf
import dataset
import dataset_caller
import os
import sys
import argparse
import functools
import pickle
import inspect
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attrib... | 11,011 | 38.188612 | 176 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/test/dataset_caller.py | import pickle
import os
import numpy as np
from multiprocessing import Pool
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def approximate_type(type_str):
int_list = ['_Bool', 'unsigned int', 'int', 'long long int', 'l... | 9,864 | 40.104167 | 118 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/split_function_path_gen.py | import os
import pickle
import random
import time
def get_file_path(folder_path, tag):
path_list=[]
file_list=os.listdir(folder_path)
'''initial path list'''
for file_name in file_list:
path_list.append(os.path.join(folder_path, file_name))
final_path_list=[]
tag_len = len(tag)
'''... | 1,570 | 29.211538 | 115 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/train_rnn.py | import argparse
import functools
import inspect
import os
import sys
import pickle
import dataset
import dataset_caller
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(fun... | 17,058 | 39.811005 | 176 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/data_loader.py | """"
Here we implement a class for loading data.
"""
import torch
from torch.autograd import Variable
from vocab import *
from config import *
import numpy as np
import random
import re
np.random.seed(0)
class DataLoader:
EOS = 0 # to mean end of sentence
UNK = 1 # to mean unknown token
maxlen = MAXL... | 3,294 | 30.682692 | 113 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/model.py | """
This file implements the Skip-Thought architecture.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from config import *
import math
import numpy as np
class Encoder(nn.Module):
thought_size = 128
word_size = 256
@staticmethod
def reverse... | 13,287 | 42.710526 | 180 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/dataset.py | import pickle
import os
import numpy as np
import re
from multiprocessing import Pool
import instruction2vec
import eval_utils as utils
from collections import Counter
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def app... | 12,586 | 40.268852 | 165 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/vocab.py | """
This code has been taken and modified from https://github.com/ryankiros/skip-thoughts
Constructing and loading dictionaries
"""
import pickle as pkl
from collections import OrderedDict
import argparse
import re
def build_dictionary(text):
"""
Build a dictionary
text: list of sentences (pre-tokenized)... | 2,454 | 27.218391 | 91 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/config.py | """
Configuration file.
"""
VOCAB_SIZE = 5000
USE_CUDA = False
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
LEARNING_RATE=1e-5
| 147 | 10.384615 | 24 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
from config import *
import numpy as np
import bert_pytorch
from bert_pytorch import dataset
from bert_pytorch import trainer
import pickle as pkl
vocab_path = "data/test_vocab... | 1,971 | 31.866667 | 117 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/dataset_caller.py | import pickle
import os
import numpy as np
from multiprocessing import Pool
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def approximate_type(type_str):
int_list = ['_Bool', 'unsigned int', 'int', 'long long int', 'l... | 10,646 | 40.589844 | 118 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/train.py | import os
try:
os.chdir(os.path.join(os.getcwd(), 'src/skip-thoughts'))
print(os.getcwd())
except:
pass
import torch
from torch import nn
from torch.autograd import Variable
import re
import pickle
import random
import numpy as np
from data_loader import DataLoader
from model import UniSkip
from config import *
from... | 3,112 | 27.824074 | 92 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/eval_utils.py | # from model import UniSkip, Encoder
from data_loader import DataLoader
from vocab import load_dictionary
from config import *
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import re
import numpy as np
import pickle
class UsableTransformer:
# @profile
d... | 1,766 | 29.465517 | 63 | py |
PalmTree | PalmTree-master/src/data_generator/dataflow_gen.py | from binaryninja import *
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from itertools import product
from sklearn.decomposition import PCA
import random
import os
import re
import tqdm
import pickle
from collections import Counter
... | 4,212 | 29.092857 | 92 | py |
PalmTree | PalmTree-master/src/data_generator/control_flow_gen.py | from binaryninja import *
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from itertools import product
from sklearn.decomposition import PCA
from collections import Counter
import random
import os
import re
import pickle
import math
... | 3,984 | 31.663934 | 96 | py |
PalmTree | PalmTree-master/pre-trained_model/vocab.py | import pickle
import tqdm
from collections import Counter
class TorchVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used to build the Vocab.
stoi:... | 6,753 | 35.311828 | 93 | py |
PalmTree | PalmTree-master/pre-trained_model/config.py | """
Configuration file.
"""
VOCAB_SIZE = 5000
USE_CUDA = True
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
LEARNING_RATE=1e-5
| 146 | 10.307692 | 24 | py |
PalmTree | PalmTree-master/pre-trained_model/how2use.py | import os
from config import *
from torch import nn
from scipy.ndimage.filters import gaussian_filter1d
from torch.autograd import Variable
import torch
import numpy as np
import eval_utils as utils
palmtree = utils.UsableTransformer(model_path="./palmtree/transformer.ep19", vocab_path="./palmtree/vocab")
# tokens h... | 736 | 26.296296 | 107 | py |
PalmTree | PalmTree-master/pre-trained_model/eval_utils.py | from torch.autograd import Variable
import torch
import re
import numpy
from torch import nn
import torch.nn.functional as F
from config import *
import vocab
# this function is how I parse and pre-pocess instructions for palmtree. It is very simple and based on regular expressions.
# If I use IDA pro or angr inst... | 3,712 | 34.361905 | 125 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/main.py | import sys
from timeit import default_timer as timer
from utils.cli_parser import parse_cli_overides
from utils.config import get_dataset
from learning.preprocess import Preprocess
from utils.ddp_init import cleanup, spawn_nproc, setup
import torch
from utils.common import prepare_train_id
from learning import initiali... | 2,506 | 33.342466 | 104 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/compute_seld_metrics.py | import os
from methods.utils.SELD_metrics import SELDMetrics
from methods.utils.data_utilities import *
from pathlib import Path
from ruamel.yaml import YAML
import argparse
from scipy import stats
import re
def jackknife_estimation(global_value, partial_estimates, significance_level=0.05):
"""
Compute jackk... | 15,630 | 50.587459 | 242 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/inference.py | class BaseInferer:
""" Base inferer class
"""
def infer(self, *args, **kwargs):
""" Perform an inference on test data.
"""
raise NotImplementedError
def fusion(self, submissions_dir, preds):
""" Ensamble predictions.
"""
raise NotImplementedError ... | 336 | 15.047619 | 46 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/training.py | class BaseTrainer:
""" Base trainer class
"""
def train_step(self, *args, **kwargs):
""" Perform a training step.
"""
raise NotImplementedError
def validate_step(self, *args, **kwargs):
""" Perform a validation step
"""
raise NotImplementedError
| 317 | 15.736842 | 45 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/data.py | from pathlib import Path
import os
import pandas as pd
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
from utils.common import int16_samples_to_float32
class BaseDataset(Dataset):
""" User defined datset
"""
def __init__(self, args, cfg, dataset):
"""
Ar... | 3,767 | 34.54717 | 126 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/metrics.py | from methods.utils.SELD_metrics import *
from utils.ddp_init import reduce_value
class Metrics(object):
"""Metrics for evaluation
"""
def __init__(self, dataset):
# self.metrics = []
self.names = ['ER_macro', 'F_macro', 'LE_macro', 'LR_macro', 'SELD_scr_macro', 'ER_micro', 'F_micro', 'LE_... | 1,807 | 35.897959 | 151 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/__init__.py | 0 | 0 | 0 | py | |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/feature.py | import torch
import torch.nn as nn
import librosa
import numpy as np
from methods.utils.stft import (STFT, LogmelFilterBank, intensityvector,
spectrogram_STFTInput)
import math
def nCr(n, r):
return math.factorial(n) // math.factorial(r) // math.factorial(n-r)
class LogmelIntensity... | 7,445 | 42.040462 | 127 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/stft.py | import math
import librosa
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from librosa import ParameterError
from torch.nn.parameter import Parameter
eps = torch.finfo(torch.float32).eps
class DFTBase(nn.Module):
def __init__(self):
"""Base class for DFT and IDFT ma... | 31,480 | 34.174302 | 107 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/model_utilities.py | import numpy as np
import torch
import torch.nn as nn
def init_layer(layer, nonlinearity='leaky_relu'):
'''
Initialize a layer
'''
classname = layer.__class__.__name__
if (classname.find('Conv') != -1) or (classname.find('Linear') != -1):
nn.init.kaiming_uniform_(layer.weight, nonlinearity... | 3,157 | 33.703297 | 96 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/loss_utilities.py | import torch
import torch.nn as nn
import torch.nn.functional as F
eps = torch.finfo(torch.float32).eps
class MSELoss:
def __init__(self, reduction='mean'):
self.reduction = reduction
self.name = 'loss_MSE'
if self.reduction != 'PIT':
self.loss = nn.MSELoss(reduction='mean')
... | 1,222 | 31.184211 | 93 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.