id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
13,415
import sys import bpy from os.path import join import math import numpy as np from mathutils import Matrix, Vector, Quaternion, Euler import os import json def init_location(cam, theta, r): # Originally, theta is negtivate # the center of circle coord is (-1, 0), r is np.random.normal(8, 1) x, z = (math.cos(theta) * r, math.sin(theta) * r) cam.location = Vector((x, -2, z)) cam.rotation_euler = Euler((-np.pi / 20, -np.pi / 2 - theta, 0)) cam.scale = Vector((-1, -1, -1)) return cam
null
13,416
import sys import bpy from os.path import join import math import numpy as np from mathutils import Matrix, Vector, Quaternion, Euler def deg2rad(angle): return -np.pi * (angle + 90) / 180. import os import json The provided code snippet includes necessary dependencies for implementing the `init_scene` function. Write a Python function `def init_scene(scene, params, gender='male', angle=0)` to solve the following problem: cam_ob.matrix_world = Matrix(((0., 0., 1, params['camera_distance']+dis), (0., -1, 0., -1.0), (-1., 0., 0., 0.), (0.0, 0.0, 0.0, 1.0))) Here is the function: def init_scene(scene, params, gender='male', angle=0): # load fbx model bpy.ops.import_scene.fbx(filepath=join(params['smpl_data_folder'], 'basicModel_%s_lbs_10_207_0_v1.0.2.fbx' % gender[0]), axis_forward='-Y', axis_up='-Z', global_scale=100) print('success load') obname = '%s_avg' % gender[0] ob = bpy.data.objects[obname] ob.data.use_auto_smooth = False # autosmooth creates artifacts # assign the existing spherical harmonics material ob.active_material = bpy.data.materials['Material'] # delete the default cube (which held the material) bpy.ops.object.select_all(action='DESELECT') bpy.data.objects['Cube'].select = True bpy.ops.object.delete(use_global=False) # set camera properties and initial position bpy.ops.object.select_all(action='DESELECT') cam_ob = bpy.data.objects['Camera'] scn = bpy.context.scene scn.objects.active = cam_ob th = deg2rad(angle) # cam_ob = init_location(cam_ob, th, params['camera_distance']) ''' cam_ob.matrix_world = Matrix(((0., 0., 1, params['camera_distance']+dis), (0., -1, 0., -1.0), (-1., 0., 0., 0.), (0.0, 0.0, 0.0, 1.0))) ''' cam_ob.data.angle = math.radians(60) cam_ob.data.lens = 60 cam_ob.data.clip_start = 0.1 cam_ob.data.sensor_width = 32 # setup an empty object in the center which will be the parent of the Camera # this allows to easily rotate an object around the origin scn.cycles.film_transparent = True scn.render.layers["RenderLayer"].use_pass_vector = True scn.render.layers["RenderLayer"].use_pass_normal = True scene.render.layers['RenderLayer'].use_pass_emit = True scene.render.layers['RenderLayer'].use_pass_emit = True scene.render.layers['RenderLayer'].use_pass_material_index = True # set render size # scn.render.resolution_x = params['resy'] # scn.render.resolution_y = params['resx'] scn.render.resolution_percentage = 100 scn.render.image_settings.file_format = 'PNG' # clear existing animation data ob.data.shape_keys.animation_data_clear() arm_ob = bpy.data.objects['Armature'] arm_ob.animation_data_clear() return(ob, obname, arm_ob, cam_ob)
cam_ob.matrix_world = Matrix(((0., 0., 1, params['camera_distance']+dis), (0., -1, 0., -1.0), (-1., 0., 0., 0.), (0.0, 0.0, 0.0, 1.0)))
13,417
import sys import bpy from os.path import join import math import numpy as np from mathutils import Matrix, Vector, Quaternion, Euler import os import json def setState0(): for ob in bpy.data.objects.values(): ob.select = False bpy.context.scene.objects.active = None
null
13,418
import sys import bpy from os.path import join import math import numpy as np from mathutils import Matrix, Vector, Quaternion, Euler part_match = {'root': 'root', 'bone_00': 'Pelvis', 'bone_01': 'L_Hip', 'bone_02': 'R_Hip', 'bone_03': 'Spine1', 'bone_04': 'L_Knee', 'bone_05': 'R_Knee', 'bone_06': 'Spine2', 'bone_07': 'L_Ankle', 'bone_08': 'R_Ankle', 'bone_09': 'Spine3', 'bone_10': 'L_Foot', 'bone_11': 'R_Foot', 'bone_12': 'Neck', 'bone_13': 'L_Collar', 'bone_14': 'R_Collar', 'bone_15': 'Head', 'bone_16': 'L_Shoulder', 'bone_17': 'R_Shoulder', 'bone_18': 'L_Elbow', 'bone_19': 'R_Elbow', 'bone_20': 'L_Wrist', 'bone_21': 'R_Wrist', 'bone_22': 'L_Hand', 'bone_23': 'R_Hand'} def rodrigues2bshapes(pose): rod_rots = np.asarray(pose).reshape(24, 3) mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots] bshapes = np.concatenate([(mat_rot - np.eye(3)).ravel() for mat_rot in mat_rots[1:]]) return(mat_rots, bshapes) import os import json def apply_trans_pose_shape(trans, pose, shape, ob, arm_ob, obname, scene, cam_ob, frame=None): # transform pose into rotation matrices (for pose) and pose blendshapes mrots, bsh = rodrigues2bshapes(pose) # set the location of the first bone to the translation parameter arm_ob.pose.bones[obname+'_Pelvis'].location = trans arm_ob.pose.bones[obname+'_root'].location = trans arm_ob.pose.bones[obname +'_root'].keyframe_insert('location', frame=frame) # set the pose of each bone to the quaternion specified by pose for ibone, mrot in enumerate(mrots): bone = arm_ob.pose.bones[obname+'_'+part_match['bone_%02d' % ibone]] bone.rotation_quaternion = Matrix(mrot).to_quaternion() if frame is not None: bone.keyframe_insert('rotation_quaternion', frame=frame) bone.keyframe_insert('location', frame=frame) # apply pose blendshapes for ibshape, bshape in enumerate(bsh): ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].value = bshape if frame is not None: ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].keyframe_insert( 'value', index=-1, frame=frame) # apply shape blendshapes for ibshape, shape_elem in enumerate(shape): ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].value = shape_elem if frame is not None: ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].keyframe_insert( 'value', index=-1, frame=frame)
null
13,419
import sys import bpy from os.path import join import math import numpy as np from mathutils import Matrix, Vector, Quaternion, Euler import os import json def load_motions(path): from glob import glob filenames = sorted(glob(join(path, '*.json'))) print(filenames) motions = {} # for filename in filenames[300:900]: for filename in filenames: infos = read_smpl(filename) for data in infos: pid = data['id'] if pid not in motions.keys(): motions[pid] = [] motions[pid].append(data) keys = list(motions.keys()) # BUG: not strictly equal: (Rh, Th, poses) != (Th, (Rh, poses)) for pid in motions.keys(): motions[pid] = merge_params(motions[pid]) motions[pid]['poses'][:, :3] = motions[pid]['Rh'] return motions def load_smpl_params(datapath): motions = load_motions(datapath) return motions
null
13,420
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner, transfer_weights from src.callback.tracking import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from src.basics import set_device from datautils import * import argparse args = parser.parse_args() print('args:', args) args.save_pretrained_model = 'patchtst_pretrained_cw'+str(args.context_points)+'_patch'+str(args.patch_len) + '_stride'+str(args.stride) + '_epochs-pretrain' + str(args.n_epochs_pretrain) + '_mask' + str(args.mask_ratio) + '_model' + str(args.pretrained_model_id) args.save_path = 'saved_models/' + args.dset_pretrain + '/masked_patchtst/' + args.model_type + '/' def get_model(c_in, args): """ c_in: number of variables """ # get number of patches num_patch = (max(args.context_points, args.patch_len)-args.patch_len) // args.stride + 1 print('number of patches:', num_patch) # get model model = PatchTST(c_in=c_in, target_dim=args.target_points, patch_len=args.patch_len, stride=args.stride, num_patch=num_patch, n_layers=args.n_layers, n_heads=args.n_heads, d_model=args.d_model, shared_embedding=True, d_ff=args.d_ff, dropout=args.dropout, head_dropout=args.head_dropout, act='relu', head_type='pretrain', res_attention=False ) # print out the model size print('number of model params', sum(p.numel() for p in model.parameters() if p.requires_grad)) return model class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): self.model, self.dls, self.loss_func, self.lr = model, dls, loss_func, lr self.opt_func = opt_func #self.opt = self.opt_func(self.model.parameters(), self.lr) self.set_opt() self.metrics = metrics self.n_inp = 2 # self.n_inp = self.dls.train.dataset.n_inp if self.dls else 0 # Initialize callbacks if cbs and not isinstance(cbs, List): cbs = [cbs] self.initialize_callbacks(cbs) # Indicator of running lr_finder self.run_finder = False def set_opt(self): if self.model: self.opt = self.opt_func(self.model.parameters(), self.lr) else: self.opt = None def default_callback(self): "get a set of default callbacks" default_cbs = [ SetupLearnerCB(), TrackTimerCB(), TrackTrainingCB(train_metrics=False, valid_metrics=True)] return default_cbs def initialize_callbacks(self, cbs): default_cbs = self.default_callback() self.cbs = update_callbacks(cbs, default_cbs) if cbs else default_cbs # add print CB self.cbs += [PrintResultsCB()] for cb in self.cbs: cb.learner = self self('init_cb') def add_callback(self, cb): if not cb: return cb.learner = self self.cbs = update_callback(cb, self.cbs) def add_callbacks(self, cbs): if not isinstance(cbs, list): cbs = [cbs] for cb in cbs: self.add_callback(cb) def remove_callback(self, cb): cb.learn = None self.cbs, removed_cb = remove_callback(cb, self.cbs) return removed_cb def remove_callbacks(self, cb_list): for cb in cb_list: self.remove_callback(cb) def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): " fit the model " self.n_epochs = n_epochs if not self.dls.valid: do_valid = False if cbs: self.add_callbacks(cbs) if lr: self.opt = self.opt_func(self.model.parameters(), lr) self('before_fit') try: for self.epoch in range(n_epochs): self('before_epoch') self.one_epoch(train=True) # if self.dls.valid: if do_valid: self.one_epoch(train=False) self('after_epoch') except KeyboardInterrupt: pass self('after_fit') def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): self.n_epochs = n_epochs self.lr_max = lr_max if lr_max else self.lr cb = OneCycleLR(lr_max=self.lr_max, pct_start=pct_start) self.fit(self.n_epochs, cbs=cb) def one_epoch(self, train): self.epoch_train() if train else self.epoch_validate() def epoch_train(self): self('before_epoch_train') self.model.train() self.dl = self.dls.train self.all_batches('train') self('after_epoch_train') def epoch_validate(self, dl=None): self('before_epoch_valid') # model at evaluation mode self.model.eval() self.dl = dl if dl else self.dls.valid if self.dl: with torch.no_grad(): self.all_batches('valid') self('after_epoch_valid') def all_batches(self, type_): # for self.num,self.batch in enumerate(progress_bar(dl, leave=False)): for num, batch in enumerate(self.dl): self.iter, self.batch = num, batch if type_ == 'train': self.batch_train() elif type_ == 'valid': self.batch_validate() elif type_ == 'predict': self.batch_predict() elif type_ == 'test': self.batch_test() def batch_train(self): self('before_batch_train') self._do_batch_train() self('after_batch_train') def batch_validate(self): self('before_batch_valid') self._do_batch_validate() self('after_batch_valid') def batch_predict(self): self('before_batch_predict') self._do_batch_predict() self('after_batch_predict') def batch_test(self): self('before_batch_test') self._do_batch_test() self('after_batch_test') def _do_batch_train(self): # forward + get loss + backward + optimize self.pred, self.loss = self.train_step(self.batch) # zero the parameter gradients self.opt.zero_grad() # gradient self.loss.backward() # update weights self.opt.step() def train_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def model_forward(self): self('before_forward') self.pred = self.model(self.xb) self('after_forward') return self.pred def _do_batch_validate(self): # forward + calculate loss self.pred, self.loss = self.valid_step(self.batch) def valid_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def _do_batch_predict(self): self.pred = self.predict_step(self.batch) def predict_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred def _do_batch_test(self): self.pred, self.yb = self.test_step(self.batch) def test_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred, self.yb def _predict(self, dl=None): # self('before_validate') self('before_predict') if dl is None: return self.dl = dl self.n_inp = dl.dataset.n_inp self.model.eval() # model at evaluation mode with torch.no_grad(): self.all_batches('predict') self('after_predict') def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if weight_path is not None: self.load(weight_path) cb = GetPredictionsCB() self.add_callback(cb) test_dl = self._prepare_data(test_data, Dataset, Dataloader, batch_size) self._predict(test_dl) self.preds = cb.preds return to_numpy(self.preds) def test(self, dl, weight_path=None, scores=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if dl is None: return else: self.dl = dl if weight_path is not None: self.load(weight_path) cb = GetTestCB() self.add_callback(cb) self('before_test') self.model.eval() with torch.no_grad(): self.all_batches('test') self('after_test') self.preds, self.targets = to_numpy([cb.preds, cb.targets]) # calculate scores if scores: s_vals = [score(cb.targets, cb.preds).to('cpu').numpy() for score in list(scores)] return self.preds, self.targets, s_vals else: return self.preds, self.targets def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): if test_data is None: return test_data if Dataset and Dataloader: test_dset = Dataset(test_data) if not batch_size: batch_size=16 test_dl = Dataloader(test_dset, batch_size) else: if self.dls: # add test_data to the dataloader defined in the dls.train test_dl = self.dls.add_dl(test_data, batch_size=batch_size) else: test_dl = test_data # assume test_data is already a form of dataloader return test_dl def get_layer_output(self, inp, layers=None, unwrap=False): """ Args: inp: can be numpy array, torch tensor or dataloader """ self.model.eval() device = next(self.model.parameters()).device if isinstance(inp, np.ndarray): inp = torch.Tensor(inp).to(device) if isinstance(inp, torch.Tensor): inp = inp.to(device) return get_layer_output(inp, model=self.model, layers=layers, unwrap=unwrap) def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): """ fintune the pretrained model. First the entire model is freezed, only head is trained up to a freeze_epochs number. Then the model is unfreezed and the entire model is trained """ assert (n_epochs>0)|(freeze_epochs>0), "Either n_epochs or freeze_epochs has to be > 0" if not base_lr: base_lr = self.lr # Finetune the head of freeze_epochs > 0: if freeze_epochs > 0: print('Finetune the head') self.freeze() self.fit_one_cycle(freeze_epochs, lr_max=base_lr, pct_start=pct_start) # Finetune the entire network if n_epochs > 0 if n_epochs > 0: print('Finetune the entire network') self.unfreeze() self.fit_one_cycle(n_epochs, lr_max=base_lr/2, pct_start=pct_start) def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): """ linear probing the pretrained model. The model is freeze except the head during finetuning """ assert (n_epochs>0), "n_epochs has to be > 0" if not base_lr: base_lr = self.lr print('Finetune the head') self.freeze() self.fit_one_cycle(n_epochs, lr_max=base_lr, pct_start=pct_start) def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): """ find the learning rate """ n_epochs = num_iter//len(self.dls.train) + 1 # indicator of lr_finder method is applied self.run_finder = True # add LRFinderCB to callback list and will remove later cb = LRFinderCB(start_lr, end_lr, num_iter, step_mode, suggestion=suggestion) # fit self.fit(n_epochs=n_epochs, cbs=cb, do_valid=False) # should remove LRFinderCB callback after fitting self.remove_callback(cb) self.run_finder = False if show_plot: cb.plot_lr_find() if suggestion: return cb.suggested_lr def freeze(self): """ freeze the model head require the model to have head attribute """ if hasattr(get_model(self.model), 'head'): # print('model head is available') for param in get_model(self.model).parameters(): param.requires_grad = False for param in get_model(self.model).head.parameters(): param.requires_grad = True # print('model is frozen except the head') def unfreeze(self): for param in get_model(self.model).parameters(): param.requires_grad = True def __call__(self, name): for cb in self.cbs: attr = getattr(cb, name) if attr is not None: attr() def save(self, fname, path, **kwargs): """ Save model and optimizer state (if `with_opt`) to `self.path/file` """ fname = join_path_file(fname, path, ext='.pth') save_model(fname, self.model, getattr(self,'opt',None), **kwargs) return fname def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): """ load the model """ if not torch.cuda.is_available(): device = "cpu" load_model(fname, self.model, self.opt, with_opt, device=device, strict=strict) def get_params(self, deep=True, **kwargs): params = BaseEstimator.get_params(self, deep=deep, **kwargs) return params def _get_param_names(self): return (k for k in self.__dict__ if not k.endswith('_')) def set_params(self, **kwargs): params = {} for key, val in kwargs.items(): params[key] = val BaseEstimator.set_params(self, **params) def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): local_rank = int(os.environ.get('LOCAL_RANK')) world_size = int(os.environ.get('WORLD_SIZE')) rank = int(os.environ.get('RANK')) print('Process {} (out of {})'.format( rank, torch.distributed.get_world_size())) self.add_callback(DistributedTrainer(local_rank=local_rank, world_size=world_size, sync_bn=sync_bn, **kwargs)) return self def find_lr(): # get dataloader dls = get_dls(args) model = get_model(dls.vars, args) # get loss loss_func = torch.nn.MSELoss(reduction='mean') # get callbacks cbs = [RevInCB(dls.vars, denorm=False)] if args.revin else [] cbs += [PatchMaskCB(patch_len=args.patch_len, stride=args.stride, mask_ratio=args.mask_ratio)] # define learner learn = Learner(dls, model, loss_func, lr=args.lr, cbs=cbs, ) # fit the data to the model suggested_lr = learn.lr_finder() print('suggested_lr', suggested_lr) return suggested_lr
null
13,421
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner, transfer_weights from src.callback.tracking import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from src.basics import set_device from datautils import * import argparse args = parser.parse_args() args.save_pretrained_model = 'patchtst_pretrained_cw'+str(args.context_points)+'_patch'+str(args.patch_len) + '_stride'+str(args.stride) + '_epochs-pretrain' + str(args.n_epochs_pretrain) + '_mask' + str(args.mask_ratio) + '_model' + str(args.pretrained_model_id) args.save_path = 'saved_models/' + args.dset_pretrain + '/masked_patchtst/' + args.model_type + '/' def get_model(c_in, args): """ c_in: number of variables """ # get number of patches num_patch = (max(args.context_points, args.patch_len)-args.patch_len) // args.stride + 1 print('number of patches:', num_patch) # get model model = PatchTST(c_in=c_in, target_dim=args.target_points, patch_len=args.patch_len, stride=args.stride, num_patch=num_patch, n_layers=args.n_layers, n_heads=args.n_heads, d_model=args.d_model, shared_embedding=True, d_ff=args.d_ff, dropout=args.dropout, head_dropout=args.head_dropout, act='relu', head_type='pretrain', res_attention=False ) # print out the model size print('number of model params', sum(p.numel() for p in model.parameters() if p.requires_grad)) return model class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): self.model, self.dls, self.loss_func, self.lr = model, dls, loss_func, lr self.opt_func = opt_func #self.opt = self.opt_func(self.model.parameters(), self.lr) self.set_opt() self.metrics = metrics self.n_inp = 2 # self.n_inp = self.dls.train.dataset.n_inp if self.dls else 0 # Initialize callbacks if cbs and not isinstance(cbs, List): cbs = [cbs] self.initialize_callbacks(cbs) # Indicator of running lr_finder self.run_finder = False def set_opt(self): if self.model: self.opt = self.opt_func(self.model.parameters(), self.lr) else: self.opt = None def default_callback(self): "get a set of default callbacks" default_cbs = [ SetupLearnerCB(), TrackTimerCB(), TrackTrainingCB(train_metrics=False, valid_metrics=True)] return default_cbs def initialize_callbacks(self, cbs): default_cbs = self.default_callback() self.cbs = update_callbacks(cbs, default_cbs) if cbs else default_cbs # add print CB self.cbs += [PrintResultsCB()] for cb in self.cbs: cb.learner = self self('init_cb') def add_callback(self, cb): if not cb: return cb.learner = self self.cbs = update_callback(cb, self.cbs) def add_callbacks(self, cbs): if not isinstance(cbs, list): cbs = [cbs] for cb in cbs: self.add_callback(cb) def remove_callback(self, cb): cb.learn = None self.cbs, removed_cb = remove_callback(cb, self.cbs) return removed_cb def remove_callbacks(self, cb_list): for cb in cb_list: self.remove_callback(cb) def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): " fit the model " self.n_epochs = n_epochs if not self.dls.valid: do_valid = False if cbs: self.add_callbacks(cbs) if lr: self.opt = self.opt_func(self.model.parameters(), lr) self('before_fit') try: for self.epoch in range(n_epochs): self('before_epoch') self.one_epoch(train=True) # if self.dls.valid: if do_valid: self.one_epoch(train=False) self('after_epoch') except KeyboardInterrupt: pass self('after_fit') def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): self.n_epochs = n_epochs self.lr_max = lr_max if lr_max else self.lr cb = OneCycleLR(lr_max=self.lr_max, pct_start=pct_start) self.fit(self.n_epochs, cbs=cb) def one_epoch(self, train): self.epoch_train() if train else self.epoch_validate() def epoch_train(self): self('before_epoch_train') self.model.train() self.dl = self.dls.train self.all_batches('train') self('after_epoch_train') def epoch_validate(self, dl=None): self('before_epoch_valid') # model at evaluation mode self.model.eval() self.dl = dl if dl else self.dls.valid if self.dl: with torch.no_grad(): self.all_batches('valid') self('after_epoch_valid') def all_batches(self, type_): # for self.num,self.batch in enumerate(progress_bar(dl, leave=False)): for num, batch in enumerate(self.dl): self.iter, self.batch = num, batch if type_ == 'train': self.batch_train() elif type_ == 'valid': self.batch_validate() elif type_ == 'predict': self.batch_predict() elif type_ == 'test': self.batch_test() def batch_train(self): self('before_batch_train') self._do_batch_train() self('after_batch_train') def batch_validate(self): self('before_batch_valid') self._do_batch_validate() self('after_batch_valid') def batch_predict(self): self('before_batch_predict') self._do_batch_predict() self('after_batch_predict') def batch_test(self): self('before_batch_test') self._do_batch_test() self('after_batch_test') def _do_batch_train(self): # forward + get loss + backward + optimize self.pred, self.loss = self.train_step(self.batch) # zero the parameter gradients self.opt.zero_grad() # gradient self.loss.backward() # update weights self.opt.step() def train_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def model_forward(self): self('before_forward') self.pred = self.model(self.xb) self('after_forward') return self.pred def _do_batch_validate(self): # forward + calculate loss self.pred, self.loss = self.valid_step(self.batch) def valid_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def _do_batch_predict(self): self.pred = self.predict_step(self.batch) def predict_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred def _do_batch_test(self): self.pred, self.yb = self.test_step(self.batch) def test_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred, self.yb def _predict(self, dl=None): # self('before_validate') self('before_predict') if dl is None: return self.dl = dl self.n_inp = dl.dataset.n_inp self.model.eval() # model at evaluation mode with torch.no_grad(): self.all_batches('predict') self('after_predict') def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if weight_path is not None: self.load(weight_path) cb = GetPredictionsCB() self.add_callback(cb) test_dl = self._prepare_data(test_data, Dataset, Dataloader, batch_size) self._predict(test_dl) self.preds = cb.preds return to_numpy(self.preds) def test(self, dl, weight_path=None, scores=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if dl is None: return else: self.dl = dl if weight_path is not None: self.load(weight_path) cb = GetTestCB() self.add_callback(cb) self('before_test') self.model.eval() with torch.no_grad(): self.all_batches('test') self('after_test') self.preds, self.targets = to_numpy([cb.preds, cb.targets]) # calculate scores if scores: s_vals = [score(cb.targets, cb.preds).to('cpu').numpy() for score in list(scores)] return self.preds, self.targets, s_vals else: return self.preds, self.targets def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): if test_data is None: return test_data if Dataset and Dataloader: test_dset = Dataset(test_data) if not batch_size: batch_size=16 test_dl = Dataloader(test_dset, batch_size) else: if self.dls: # add test_data to the dataloader defined in the dls.train test_dl = self.dls.add_dl(test_data, batch_size=batch_size) else: test_dl = test_data # assume test_data is already a form of dataloader return test_dl def get_layer_output(self, inp, layers=None, unwrap=False): """ Args: inp: can be numpy array, torch tensor or dataloader """ self.model.eval() device = next(self.model.parameters()).device if isinstance(inp, np.ndarray): inp = torch.Tensor(inp).to(device) if isinstance(inp, torch.Tensor): inp = inp.to(device) return get_layer_output(inp, model=self.model, layers=layers, unwrap=unwrap) def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): """ fintune the pretrained model. First the entire model is freezed, only head is trained up to a freeze_epochs number. Then the model is unfreezed and the entire model is trained """ assert (n_epochs>0)|(freeze_epochs>0), "Either n_epochs or freeze_epochs has to be > 0" if not base_lr: base_lr = self.lr # Finetune the head of freeze_epochs > 0: if freeze_epochs > 0: print('Finetune the head') self.freeze() self.fit_one_cycle(freeze_epochs, lr_max=base_lr, pct_start=pct_start) # Finetune the entire network if n_epochs > 0 if n_epochs > 0: print('Finetune the entire network') self.unfreeze() self.fit_one_cycle(n_epochs, lr_max=base_lr/2, pct_start=pct_start) def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): """ linear probing the pretrained model. The model is freeze except the head during finetuning """ assert (n_epochs>0), "n_epochs has to be > 0" if not base_lr: base_lr = self.lr print('Finetune the head') self.freeze() self.fit_one_cycle(n_epochs, lr_max=base_lr, pct_start=pct_start) def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): """ find the learning rate """ n_epochs = num_iter//len(self.dls.train) + 1 # indicator of lr_finder method is applied self.run_finder = True # add LRFinderCB to callback list and will remove later cb = LRFinderCB(start_lr, end_lr, num_iter, step_mode, suggestion=suggestion) # fit self.fit(n_epochs=n_epochs, cbs=cb, do_valid=False) # should remove LRFinderCB callback after fitting self.remove_callback(cb) self.run_finder = False if show_plot: cb.plot_lr_find() if suggestion: return cb.suggested_lr def freeze(self): """ freeze the model head require the model to have head attribute """ if hasattr(get_model(self.model), 'head'): # print('model head is available') for param in get_model(self.model).parameters(): param.requires_grad = False for param in get_model(self.model).head.parameters(): param.requires_grad = True # print('model is frozen except the head') def unfreeze(self): for param in get_model(self.model).parameters(): param.requires_grad = True def __call__(self, name): for cb in self.cbs: attr = getattr(cb, name) if attr is not None: attr() def save(self, fname, path, **kwargs): """ Save model and optimizer state (if `with_opt`) to `self.path/file` """ fname = join_path_file(fname, path, ext='.pth') save_model(fname, self.model, getattr(self,'opt',None), **kwargs) return fname def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): """ load the model """ if not torch.cuda.is_available(): device = "cpu" load_model(fname, self.model, self.opt, with_opt, device=device, strict=strict) def get_params(self, deep=True, **kwargs): params = BaseEstimator.get_params(self, deep=deep, **kwargs) return params def _get_param_names(self): return (k for k in self.__dict__ if not k.endswith('_')) def set_params(self, **kwargs): params = {} for key, val in kwargs.items(): params[key] = val BaseEstimator.set_params(self, **params) def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): local_rank = int(os.environ.get('LOCAL_RANK')) world_size = int(os.environ.get('WORLD_SIZE')) rank = int(os.environ.get('RANK')) print('Process {} (out of {})'.format( rank, torch.distributed.get_world_size())) self.add_callback(DistributedTrainer(local_rank=local_rank, world_size=world_size, sync_bn=sync_bn, **kwargs)) return self def pretrain_func(lr=args.lr): # get dataloader dls = get_dls(args) # get model model = get_model(dls.vars, args) # get loss loss_func = torch.nn.MSELoss(reduction='mean') # get callbacks cbs = [RevInCB(dls.vars, denorm=False)] if args.revin else [] cbs += [ PatchMaskCB(patch_len=args.patch_len, stride=args.stride, mask_ratio=args.mask_ratio), SaveModelCB(monitor='valid_loss', fname=args.save_pretrained_model, path=args.save_path) ] # define learner learn = Learner(dls, model, loss_func, lr=lr, cbs=cbs, #metrics=[mse] ) # fit the data to the model learn.fit_one_cycle(n_epochs=args.n_epochs_pretrain, lr_max=lr) train_loss = learn.recorder['train_loss'] valid_loss = learn.recorder['valid_loss'] df = pd.DataFrame(data={'train_loss': train_loss, 'valid_loss': valid_loss}) df.to_csv(args.save_path + args.save_pretrained_model + '_losses.csv', float_format='%.6f', index=False)
null
13,422
import torch from torch import nn from .core import Callback The provided code snippet includes necessary dependencies for implementing the `create_patch` function. Write a Python function `def create_patch(xb, patch_len, stride)` to solve the following problem: xb: [bs x seq_len x n_vars] Here is the function: def create_patch(xb, patch_len, stride): """ xb: [bs x seq_len x n_vars] """ seq_len = xb.shape[1] num_patch = (max(seq_len, patch_len)-patch_len) // stride + 1 tgt_len = patch_len + stride*(num_patch-1) s_begin = seq_len - tgt_len xb = xb[:, s_begin:, :] # xb: [bs x tgt_len x nvars] xb = xb.unfold(dimension=1, size=patch_len, step=stride) # xb: [bs x num_patch x n_vars x patch_len] return xb, num_patch
xb: [bs x seq_len x n_vars]
13,423
import torch from torch import nn from .core import Callback def random_masking(xb, mask_ratio): # xb: [bs x num_patch x n_vars x patch_len] bs, L, nvars, D = xb.shape x = xb.clone() len_keep = int(L * (1 - mask_ratio)) noise = torch.rand(bs, L, nvars,device=xb.device) # noise in [0, 1], bs x L x nvars # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # ids_restore: [bs x L x nvars] # keep the first subset ids_keep = ids_shuffle[:, :len_keep, :] # ids_keep: [bs x len_keep x nvars] x_kept = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, 1, D)) # x_kept: [bs x len_keep x nvars x patch_len] # removed x x_removed = torch.zeros(bs, L-len_keep, nvars, D, device=xb.device) # x_removed: [bs x (L-len_keep) x nvars x patch_len] x_ = torch.cat([x_kept, x_removed], dim=1) # x_: [bs x L x nvars x patch_len] # combine the kept part and the removed one x_masked = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1,1,1,D)) # x_masked: [bs x num_patch x nvars x patch_len] # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([bs, L, nvars], device=x.device) # mask: [bs x num_patch x nvars] mask[:, :len_keep, :] = 0 # unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) # [bs x num_patch x nvars] return x_masked, x_kept, mask, ids_restore
null
13,424
import torch from torch import nn from .core import Callback def random_masking_3D(xb, mask_ratio): # xb: [bs x num_patch x dim] bs, L, D = xb.shape x = xb.clone() len_keep = int(L * (1 - mask_ratio)) noise = torch.rand(bs, L, device=xb.device) # noise in [0, 1], bs x L # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # ids_restore: [bs x L] # keep the first subset ids_keep = ids_shuffle[:, :len_keep] # ids_keep: [bs x len_keep] x_kept = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) # x_kept: [bs x len_keep x dim] # removed x x_removed = torch.zeros(bs, L-len_keep, D, device=xb.device) # x_removed: [bs x (L-len_keep) x dim] x_ = torch.cat([x_kept, x_removed], dim=1) # x_: [bs x L x dim] # combine the kept part and the removed one x_masked = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1,1,D)) # x_masked: [bs x num_patch x dim] # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([bs, L], device=x.device) # mask: [bs x num_patch] mask[:, :len_keep] = 0 # unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) # [bs x num_patch] return x_masked, x_kept, mask, ids_restore
null
13,425
from cmath import inf from ..basics import * from .core import Callback from torch.optim import lr_scheduler from torch.optim.lr_scheduler import _LRScheduler The provided code snippet includes necessary dependencies for implementing the `valley` function. Write a Python function `def valley(lrs:list, losses:list)` to solve the following problem: Suggests a learning rate from the longest valley and returns its index Here is the function: def valley(lrs:list, losses:list): "Suggests a learning rate from the longest valley and returns its index" n = len(losses) max_start, max_end = 0,0 # find the longest valley lds = [1]*n for i in range(1,n): for j in range(0,i): if (losses[i] < losses[j]) and (lds[i] < lds[j] + 1): lds[i] = lds[j] + 1 if lds[max_end] < lds[i]: max_end = i max_start = max_end - lds[max_end] sections = (max_end - max_start) / 3 idx = max_start + int(sections) + int(sections/2) return float(lrs[idx])
Suggests a learning rate from the longest valley and returns its index
13,426
from torch import nn import collections from collections import OrderedDict import torch import os from datetime import timedelta def init_ddp(): local_rank = int(os.environ.get('LOCAL_RANK')) world_size = int(os.environ.get('WORLD_SIZE')) rank = int(os.environ.get('RANK')) torch.cuda.set_device(local_rank) torch.distributed.init_process_group( 'nccl', init_method='env://', world_size=world_size, rank=rank, timeout=timedelta(seconds=600) )
null
13,427
import os import numpy as np import pandas as pd import os import torch from torch.utils.data import Dataset, DataLoader from sklearn.preprocessing import StandardScaler from src.data.timefeatures import time_features import warnings def _torch(*dfs): return tuple(torch.from_numpy(x).float() for x in dfs)
null
13,428
from typing import List import numpy as np import pandas as pd from pandas.tseries import offsets from pandas.tseries.frequencies import to_offset def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: """ Returns a list of time features that will be appropriate for the given frequency string. Parameters ---------- freq_str Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. """ features_by_offsets = { offsets.YearEnd: [], offsets.QuarterEnd: [MonthOfYear], offsets.MonthEnd: [MonthOfYear], offsets.Week: [DayOfMonth, WeekOfYear], offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], offsets.Minute: [ MinuteOfHour, HourOfDay, DayOfWeek, DayOfMonth, DayOfYear, ], offsets.Second: [ SecondOfMinute, MinuteOfHour, HourOfDay, DayOfWeek, DayOfMonth, DayOfYear, ], } offset = to_offset(freq_str) for offset_type, feature_classes in features_by_offsets.items(): if isinstance(offset, offset_type): return [cls() for cls in feature_classes] supported_freq_msg = f""" Unsupported frequency {freq_str} The following frequencies are supported: Y - yearly alias: A M - monthly W - weekly D - daily B - business days H - hourly T - minutely alias: min S - secondly """ raise RuntimeError(supported_freq_msg) def time_features(dates, freq='h'): return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)])
null
13,429
from typing import List import torch from torch.optim import Adam from torch import nn from torch.nn.parallel import DistributedDataParallel from .basics import * from .callback.core import * from .callback.tracking import * from .callback.scheduler import * from .callback.distributed import * from .utils import * from pathlib import Path from tqdm import tqdm import numpy as np from sklearn.base import BaseEstimator from unittest.mock import patch def get_model(model): "Return the model maybe wrapped inside `model`." return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model import torch import torch import torch from torch.optim import lr_scheduler from torch.optim.lr_scheduler import _LRScheduler import torch from torch.utils.data import DistributedSampler, DataLoader, SequentialSampler from torch.nn.parallel import DistributedDataParallel from torch import nn import torch The provided code snippet includes necessary dependencies for implementing the `save_model` function. Write a Python function `def save_model(path, model, opt, with_opt=True, pickle_protocol=2)` to solve the following problem: Save `model` to `file` along with `opt` (if available, and if `with_opt`) Here is the function: def save_model(path, model, opt, with_opt=True, pickle_protocol=2): "Save `model` to `file` along with `opt` (if available, and if `with_opt`)" if opt is None: with_opt=False state = get_model(model).state_dict() if with_opt: state = {'model': state, 'opt':opt.state_dict()} torch.save(state, path, pickle_protocol=pickle_protocol)
Save `model` to `file` along with `opt` (if available, and if `with_opt`)
13,430
from typing import List import torch from torch.optim import Adam from torch import nn from torch.nn.parallel import DistributedDataParallel from .basics import * from .callback.core import * from .callback.tracking import * from .callback.scheduler import * from .callback.distributed import * from .utils import * from pathlib import Path from tqdm import tqdm import numpy as np from sklearn.base import BaseEstimator from unittest.mock import patch def get_model(model): "Return the model maybe wrapped inside `model`." return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model import torch import torch import torch from torch.optim import lr_scheduler from torch.optim.lr_scheduler import _LRScheduler import torch from torch.utils.data import DistributedSampler, DataLoader, SequentialSampler from torch.nn.parallel import DistributedDataParallel from torch import nn import torch The provided code snippet includes necessary dependencies for implementing the `load_model` function. Write a Python function `def load_model(path, model, opt=None, with_opt=False, device='cpu', strict=True)` to solve the following problem: load the saved model Here is the function: def load_model(path, model, opt=None, with_opt=False, device='cpu', strict=True): " load the saved model " state = torch.load(path, map_location=device) if not opt: with_opt=False model_state = state['model'] if with_opt else state get_model(model).load_state_dict(model_state, strict=strict) if with_opt: opt.load_state_dict(state['opt']) model = model.to(device)
load the saved model
13,431
from typing import List import torch from torch.optim import Adam from torch import nn from torch.nn.parallel import DistributedDataParallel from .basics import * from .callback.core import * from .callback.tracking import * from .callback.scheduler import * from .callback.distributed import * from .utils import * from pathlib import Path from tqdm import tqdm import numpy as np from sklearn.base import BaseEstimator from unittest.mock import patch The provided code snippet includes necessary dependencies for implementing the `join_path_file` function. Write a Python function `def join_path_file(file, path, ext='')` to solve the following problem: Return `path/file` if file is a string or a `Path`, file otherwise Here is the function: def join_path_file(file, path, ext=''): "Return `path/file` if file is a string or a `Path`, file otherwise" if not isinstance(file, (str, Path)): return file if not isinstance(path, Path): path = Path(path) path.mkdir(parents=True, exist_ok=True) return path/f'{file}{ext}'
Return `path/file` if file is a string or a `Path`, file otherwise
13,432
from typing import List import torch from torch.optim import Adam from torch import nn from torch.nn.parallel import DistributedDataParallel from .basics import * from .callback.core import * from .callback.tracking import * from .callback.scheduler import * from .callback.distributed import * from .utils import * from pathlib import Path from tqdm import tqdm import numpy as np from sklearn.base import BaseEstimator from unittest.mock import patch def update_callback(cb, list_cbs): def update_callbacks(list_cbs, default_cbs): for cb in list_cbs: default_cbs = update_callback(cb, default_cbs) return default_cbs
null
13,433
from typing import List import torch from torch.optim import Adam from torch import nn from torch.nn.parallel import DistributedDataParallel from .basics import * from .callback.core import * from .callback.tracking import * from .callback.scheduler import * from .callback.distributed import * from .utils import * from pathlib import Path from tqdm import tqdm import numpy as np from sklearn.base import BaseEstimator from unittest.mock import patch def remove_callback(cb, list_cbs): for cb_ in list_cbs: if type(cb_) == type(cb): list_cbs.remove(cb_) break return list_cbs, cb_
null
13,434
from typing import List import torch from torch.optim import Adam from torch import nn from torch.nn.parallel import DistributedDataParallel from .basics import * from .callback.core import * from .callback.tracking import * from .callback.scheduler import * from .callback.distributed import * from .utils import * from pathlib import Path from tqdm import tqdm import numpy as np from sklearn.base import BaseEstimator from unittest.mock import patch import numpy as np def unwrap_model(model): unwrapped_model = nested_children(model) unwrapped_model = flatten_dict(unwrapped_model) unwrapped_model = nn.Sequential(OrderedDict(unwrapped_model)) return unwrapped_model The provided code snippet includes necessary dependencies for implementing the `get_layer_output` function. Write a Python function `def get_layer_output(inp, model, layers=None, unwrap=False)` to solve the following problem: layers is a list of module names Here is the function: def get_layer_output(inp, model, layers=None, unwrap=False): """ layers is a list of module names """ orig_model = model if unwrap: model = unwrap_model(model) if not layers: layers = list(dict(model.named_children()).keys()) if not isinstance(layers, list): layers = [layers] activation = {} def getActivation(name): # the hook signature def hook(model, input, output): activation[name] = output.detach().cpu().numpy() return hook # register forward hooks on the layers of choice h_list = [getattr(model, layer).register_forward_hook(getActivation(layer)) for layer in layers] model.eval() out = orig_model(inp) for h in h_list: h.remove() return activation
layers is a list of module names
13,435
import torch import collections from collections import OrderedDict def set_device(usage=5): "set the device that has usage < default usage " device_ids = get_available_cuda(usage=usage) torch.cuda.set_device(device_ids[0]) # get the first available device def get_available_cuda(usage=10): if not torch.cuda.is_available(): return # collect available cuda devices, only collect devices that has less that 'usage' percent device_ids = [] for device in range(torch.cuda.device_count()): if torch.cuda.utilization(device) < usage: device_ids.append(device) return device_ids The provided code snippet includes necessary dependencies for implementing the `get_device` function. Write a Python function `def get_device(use_cuda=True, device_id=None, usage=5)` to solve the following problem: Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU Here is the function: def get_device(use_cuda=True, device_id=None, usage=5): "Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU" if not torch.cuda.is_available(): use_cuda = False else: if device_id is None: device_ids = get_available_cuda(usage=usage) device_id = device_ids[0] # get the first available device torch.cuda.set_device(device_id) return torch.device(torch.cuda.current_device()) if use_cuda else torch.device('cpu')
Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU
13,436
import torch import collections from collections import OrderedDict def default_device(use_cuda=True): "Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU" if not torch.cuda.is_available(): use_cuda = False return torch.device(torch.cuda.current_device()) if use_cuda else torch.device('cpu') The provided code snippet includes necessary dependencies for implementing the `to_device` function. Write a Python function `def to_device(b, device=None, non_blocking=False)` to solve the following problem: Recursively put `b` on `device` components of b are torch tensors Here is the function: def to_device(b, device=None, non_blocking=False): """ Recursively put `b` on `device` components of b are torch tensors """ if device is None: device = default_device(use_cuda=True) if isinstance(b, dict): return {key: to_device(val, device) for key, val in b.items()} if isinstance(b, (list, tuple)): return type(b)(to_device(o, device) for o in b) return b.to(device, non_blocking=non_blocking)
Recursively put `b` on `device` components of b are torch tensors
13,437
import torch import collections from collections import OrderedDict The provided code snippet includes necessary dependencies for implementing the `to_numpy` function. Write a Python function `def to_numpy(b)` to solve the following problem: Components of b are torch tensors Here is the function: def to_numpy(b): """ Components of b are torch tensors """ if isinstance(b, dict): return {key: to_numpy(val) for key, val in b.items()} if isinstance(b, (list, tuple)): return type(b)(to_numpy(o) for o in b) return b.detach().cpu().numpy()
Components of b are torch tensors
13,438
import torch from torch import nn import math def PositionalEncoding(q_len, d_model, normalize=True): pe = torch.zeros(q_len, d_model) position = torch.arange(0, q_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) if normalize: pe = pe - pe.mean() pe = pe / (pe.std() * 10) return pe def positional_encoding(pe, learn_pe, q_len, d_model): # Positional encoding if pe == None: W_pos = torch.empty((q_len, d_model)) # pe = None and learn_pe = False can be used to measure impact of pe nn.init.uniform_(W_pos, -0.02, 0.02) learn_pe = False elif pe == 'zero': W_pos = torch.empty((q_len, 1)) nn.init.uniform_(W_pos, -0.02, 0.02) elif pe == 'zeros': W_pos = torch.empty((q_len, d_model)) nn.init.uniform_(W_pos, -0.02, 0.02) elif pe == 'normal' or pe == 'gauss': W_pos = torch.zeros((q_len, 1)) torch.nn.init.normal_(W_pos, mean=0.0, std=0.1) elif pe == 'uniform': W_pos = torch.zeros((q_len, 1)) nn.init.uniform_(W_pos, a=0.0, b=0.1) elif pe == 'sincos': W_pos = PositionalEncoding(q_len, d_model, normalize=True) else: raise ValueError(f"{pe} is not a valid pe (positional encoder. Available types: 'gauss'=='normal', \ 'zeros', 'zero', uniform', 'sincos', None.)") return nn.Parameter(W_pos, requires_grad=learn_pe)
null
13,439
import torch from torch import nn The provided code snippet includes necessary dependencies for implementing the `sigmoid_range` function. Write a Python function `def sigmoid_range(x, low, high)` to solve the following problem: Sigmoid function with range `(low, high)` Here is the function: def sigmoid_range(x, low, high): "Sigmoid function with range `(low, high)`" return torch.sigmoid(x) * (high - low) + low
Sigmoid function with range `(low, high)`
13,440
import torch from torch import nn def get_activation_fn(activation): if callable(activation): return activation() elif activation.lower() == "relu": return nn.ReLU() elif activation.lower() == "gelu": return nn.GELU() raise ValueError(f'{activation} is not available. You can use "relu", "gelu", or a callable')
null
13,441
import torch from torch import Tensor import torch.nn.functional as F def mse(y_true, y_pred): return F.mse_loss(y_true, y_pred, reduction='mean')
null
13,442
import torch from torch import Tensor import torch.nn.functional as F def rmse(y_true, y_pred): return torch.sqrt(F.mse_loss(y_true, y_pred, reduction='mean'))
null
13,443
import torch from torch import Tensor import torch.nn.functional as F def mae(y_true, y_pred): return F.l1_loss(y_true, y_pred, reduction='mean')
null
13,444
import torch from torch import Tensor import torch.nn.functional as F def r2_score(y_true, y_pred): from sklearn.metrics import r2_score return r2_score(y_true, y_pred)
null
13,445
import torch from torch import Tensor import torch.nn.functional as F def mape(y_true, y_pred): from sklearn.metrics import mean_absolute_percentage_error return mean_absolute_percentage_error(y_true, y_pred)
null
13,446
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner, transfer_weights from src.callback.core import * from src.callback.tracking import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from src.basics import set_device from datautils import * import argparse args = parser.parse_args() print('args:', args) args.save_path = 'saved_models/' + args.dset_finetune + '/masked_patchtst/' + args.model_type + '/' if args.is_finetune: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name elif args.is_linear_probe: args.save_finetuned_model = args.dset_finetune+'_patchtst_linear-probe'+suffix_name else: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name def get_model(c_in, args, head_type, weight_path=None): """ c_in: number of variables """ # get number of patches num_patch = (max(args.context_points, args.patch_len)-args.patch_len) // args.stride + 1 print('number of patches:', num_patch) # get model model = PatchTST(c_in=c_in, target_dim=args.target_points, patch_len=args.patch_len, stride=args.stride, num_patch=num_patch, n_layers=args.n_layers, n_heads=args.n_heads, d_model=args.d_model, shared_embedding=True, d_ff=args.d_ff, dropout=args.dropout, head_dropout=args.head_dropout, act='relu', head_type=head_type, res_attention=False ) if weight_path: model = transfer_weights(weight_path, model) # print out the model size print('number of model params', sum(p.numel() for p in model.parameters() if p.requires_grad)) return model class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): self.model, self.dls, self.loss_func, self.lr = model, dls, loss_func, lr self.opt_func = opt_func #self.opt = self.opt_func(self.model.parameters(), self.lr) self.set_opt() self.metrics = metrics self.n_inp = 2 # self.n_inp = self.dls.train.dataset.n_inp if self.dls else 0 # Initialize callbacks if cbs and not isinstance(cbs, List): cbs = [cbs] self.initialize_callbacks(cbs) # Indicator of running lr_finder self.run_finder = False def set_opt(self): if self.model: self.opt = self.opt_func(self.model.parameters(), self.lr) else: self.opt = None def default_callback(self): "get a set of default callbacks" default_cbs = [ SetupLearnerCB(), TrackTimerCB(), TrackTrainingCB(train_metrics=False, valid_metrics=True)] return default_cbs def initialize_callbacks(self, cbs): default_cbs = self.default_callback() self.cbs = update_callbacks(cbs, default_cbs) if cbs else default_cbs # add print CB self.cbs += [PrintResultsCB()] for cb in self.cbs: cb.learner = self self('init_cb') def add_callback(self, cb): if not cb: return cb.learner = self self.cbs = update_callback(cb, self.cbs) def add_callbacks(self, cbs): if not isinstance(cbs, list): cbs = [cbs] for cb in cbs: self.add_callback(cb) def remove_callback(self, cb): cb.learn = None self.cbs, removed_cb = remove_callback(cb, self.cbs) return removed_cb def remove_callbacks(self, cb_list): for cb in cb_list: self.remove_callback(cb) def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): " fit the model " self.n_epochs = n_epochs if not self.dls.valid: do_valid = False if cbs: self.add_callbacks(cbs) if lr: self.opt = self.opt_func(self.model.parameters(), lr) self('before_fit') try: for self.epoch in range(n_epochs): self('before_epoch') self.one_epoch(train=True) # if self.dls.valid: if do_valid: self.one_epoch(train=False) self('after_epoch') except KeyboardInterrupt: pass self('after_fit') def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): self.n_epochs = n_epochs self.lr_max = lr_max if lr_max else self.lr cb = OneCycleLR(lr_max=self.lr_max, pct_start=pct_start) self.fit(self.n_epochs, cbs=cb) def one_epoch(self, train): self.epoch_train() if train else self.epoch_validate() def epoch_train(self): self('before_epoch_train') self.model.train() self.dl = self.dls.train self.all_batches('train') self('after_epoch_train') def epoch_validate(self, dl=None): self('before_epoch_valid') # model at evaluation mode self.model.eval() self.dl = dl if dl else self.dls.valid if self.dl: with torch.no_grad(): self.all_batches('valid') self('after_epoch_valid') def all_batches(self, type_): # for self.num,self.batch in enumerate(progress_bar(dl, leave=False)): for num, batch in enumerate(self.dl): self.iter, self.batch = num, batch if type_ == 'train': self.batch_train() elif type_ == 'valid': self.batch_validate() elif type_ == 'predict': self.batch_predict() elif type_ == 'test': self.batch_test() def batch_train(self): self('before_batch_train') self._do_batch_train() self('after_batch_train') def batch_validate(self): self('before_batch_valid') self._do_batch_validate() self('after_batch_valid') def batch_predict(self): self('before_batch_predict') self._do_batch_predict() self('after_batch_predict') def batch_test(self): self('before_batch_test') self._do_batch_test() self('after_batch_test') def _do_batch_train(self): # forward + get loss + backward + optimize self.pred, self.loss = self.train_step(self.batch) # zero the parameter gradients self.opt.zero_grad() # gradient self.loss.backward() # update weights self.opt.step() def train_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def model_forward(self): self('before_forward') self.pred = self.model(self.xb) self('after_forward') return self.pred def _do_batch_validate(self): # forward + calculate loss self.pred, self.loss = self.valid_step(self.batch) def valid_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def _do_batch_predict(self): self.pred = self.predict_step(self.batch) def predict_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred def _do_batch_test(self): self.pred, self.yb = self.test_step(self.batch) def test_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred, self.yb def _predict(self, dl=None): # self('before_validate') self('before_predict') if dl is None: return self.dl = dl self.n_inp = dl.dataset.n_inp self.model.eval() # model at evaluation mode with torch.no_grad(): self.all_batches('predict') self('after_predict') def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if weight_path is not None: self.load(weight_path) cb = GetPredictionsCB() self.add_callback(cb) test_dl = self._prepare_data(test_data, Dataset, Dataloader, batch_size) self._predict(test_dl) self.preds = cb.preds return to_numpy(self.preds) def test(self, dl, weight_path=None, scores=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if dl is None: return else: self.dl = dl if weight_path is not None: self.load(weight_path) cb = GetTestCB() self.add_callback(cb) self('before_test') self.model.eval() with torch.no_grad(): self.all_batches('test') self('after_test') self.preds, self.targets = to_numpy([cb.preds, cb.targets]) # calculate scores if scores: s_vals = [score(cb.targets, cb.preds).to('cpu').numpy() for score in list(scores)] return self.preds, self.targets, s_vals else: return self.preds, self.targets def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): if test_data is None: return test_data if Dataset and Dataloader: test_dset = Dataset(test_data) if not batch_size: batch_size=16 test_dl = Dataloader(test_dset, batch_size) else: if self.dls: # add test_data to the dataloader defined in the dls.train test_dl = self.dls.add_dl(test_data, batch_size=batch_size) else: test_dl = test_data # assume test_data is already a form of dataloader return test_dl def get_layer_output(self, inp, layers=None, unwrap=False): """ Args: inp: can be numpy array, torch tensor or dataloader """ self.model.eval() device = next(self.model.parameters()).device if isinstance(inp, np.ndarray): inp = torch.Tensor(inp).to(device) if isinstance(inp, torch.Tensor): inp = inp.to(device) return get_layer_output(inp, model=self.model, layers=layers, unwrap=unwrap) def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): """ fintune the pretrained model. First the entire model is freezed, only head is trained up to a freeze_epochs number. Then the model is unfreezed and the entire model is trained """ assert (n_epochs>0)|(freeze_epochs>0), "Either n_epochs or freeze_epochs has to be > 0" if not base_lr: base_lr = self.lr # Finetune the head of freeze_epochs > 0: if freeze_epochs > 0: print('Finetune the head') self.freeze() self.fit_one_cycle(freeze_epochs, lr_max=base_lr, pct_start=pct_start) # Finetune the entire network if n_epochs > 0 if n_epochs > 0: print('Finetune the entire network') self.unfreeze() self.fit_one_cycle(n_epochs, lr_max=base_lr/2, pct_start=pct_start) def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): """ linear probing the pretrained model. The model is freeze except the head during finetuning """ assert (n_epochs>0), "n_epochs has to be > 0" if not base_lr: base_lr = self.lr print('Finetune the head') self.freeze() self.fit_one_cycle(n_epochs, lr_max=base_lr, pct_start=pct_start) def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): """ find the learning rate """ n_epochs = num_iter//len(self.dls.train) + 1 # indicator of lr_finder method is applied self.run_finder = True # add LRFinderCB to callback list and will remove later cb = LRFinderCB(start_lr, end_lr, num_iter, step_mode, suggestion=suggestion) # fit self.fit(n_epochs=n_epochs, cbs=cb, do_valid=False) # should remove LRFinderCB callback after fitting self.remove_callback(cb) self.run_finder = False if show_plot: cb.plot_lr_find() if suggestion: return cb.suggested_lr def freeze(self): """ freeze the model head require the model to have head attribute """ if hasattr(get_model(self.model), 'head'): # print('model head is available') for param in get_model(self.model).parameters(): param.requires_grad = False for param in get_model(self.model).head.parameters(): param.requires_grad = True # print('model is frozen except the head') def unfreeze(self): for param in get_model(self.model).parameters(): param.requires_grad = True def __call__(self, name): for cb in self.cbs: attr = getattr(cb, name) if attr is not None: attr() def save(self, fname, path, **kwargs): """ Save model and optimizer state (if `with_opt`) to `self.path/file` """ fname = join_path_file(fname, path, ext='.pth') save_model(fname, self.model, getattr(self,'opt',None), **kwargs) return fname def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): """ load the model """ if not torch.cuda.is_available(): device = "cpu" load_model(fname, self.model, self.opt, with_opt, device=device, strict=strict) def get_params(self, deep=True, **kwargs): params = BaseEstimator.get_params(self, deep=deep, **kwargs) return params def _get_param_names(self): return (k for k in self.__dict__ if not k.endswith('_')) def set_params(self, **kwargs): params = {} for key, val in kwargs.items(): params[key] = val BaseEstimator.set_params(self, **params) def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): local_rank = int(os.environ.get('LOCAL_RANK')) world_size = int(os.environ.get('WORLD_SIZE')) rank = int(os.environ.get('RANK')) print('Process {} (out of {})'.format( rank, torch.distributed.get_world_size())) self.add_callback(DistributedTrainer(local_rank=local_rank, world_size=world_size, sync_bn=sync_bn, **kwargs)) return self def transfer_weights(weights_path, model, exclude_head=True, device='cpu'): # state_dict = model.state_dict() new_state_dict = torch.load(weights_path, map_location=device) #print('new_state_dict',new_state_dict) matched_layers = 0 unmatched_layers = [] for name, param in model.state_dict().items(): if exclude_head and 'head' in name: continue if name in new_state_dict: matched_layers += 1 input_param = new_state_dict[name] if input_param.shape == param.shape: param.copy_(input_param) else: unmatched_layers.append(name) else: unmatched_layers.append(name) pass # these are weights that weren't in the original model, such as a new head if matched_layers == 0: raise Exception("No shared weight names were found between the models") else: if len(unmatched_layers) > 0: print(f'check unmatched_layers: {unmatched_layers}') else: print(f"weights from {weights_path} successfully transferred!\n") model = model.to(device) return model def find_lr(head_type): # get dataloader dls = get_dls(args) model = get_model(dls.vars, args, head_type) # transfer weight # weight_path = args.save_path + args.pretrained_model + '.pth' model = transfer_weights(args.pretrained_model, model) # get loss loss_func = torch.nn.MSELoss(reduction='mean') # get callbacks cbs = [RevInCB(dls.vars)] if args.revin else [] cbs += [PatchCB(patch_len=args.patch_len, stride=args.stride)] # define learner learn = Learner(dls, model, loss_func, lr=args.lr, cbs=cbs, ) # fit the data to the model suggested_lr = learn.lr_finder() print('suggested_lr', suggested_lr) return suggested_lr
null
13,447
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner, transfer_weights from src.callback.core import * from src.callback.tracking import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from src.basics import set_device from datautils import * import argparse args = parser.parse_args() print('args:', args) args.save_path = 'saved_models/' + args.dset_finetune + '/masked_patchtst/' + args.model_type + '/' if args.is_finetune: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name elif args.is_linear_probe: args.save_finetuned_model = args.dset_finetune+'_patchtst_linear-probe'+suffix_name else: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name def get_model(c_in, args, head_type, weight_path=None): """ c_in: number of variables """ # get number of patches num_patch = (max(args.context_points, args.patch_len)-args.patch_len) // args.stride + 1 print('number of patches:', num_patch) # get model model = PatchTST(c_in=c_in, target_dim=args.target_points, patch_len=args.patch_len, stride=args.stride, num_patch=num_patch, n_layers=args.n_layers, n_heads=args.n_heads, d_model=args.d_model, shared_embedding=True, d_ff=args.d_ff, dropout=args.dropout, head_dropout=args.head_dropout, act='relu', head_type=head_type, res_attention=False ) if weight_path: model = transfer_weights(weight_path, model) # print out the model size print('number of model params', sum(p.numel() for p in model.parameters() if p.requires_grad)) return model def save_recorders(learn): train_loss = learn.recorder['train_loss'] valid_loss = learn.recorder['valid_loss'] df = pd.DataFrame(data={'train_loss': train_loss, 'valid_loss': valid_loss}) df.to_csv(args.save_path + args.save_finetuned_model + '_losses.csv', float_format='%.6f', index=False) class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): self.model, self.dls, self.loss_func, self.lr = model, dls, loss_func, lr self.opt_func = opt_func #self.opt = self.opt_func(self.model.parameters(), self.lr) self.set_opt() self.metrics = metrics self.n_inp = 2 # self.n_inp = self.dls.train.dataset.n_inp if self.dls else 0 # Initialize callbacks if cbs and not isinstance(cbs, List): cbs = [cbs] self.initialize_callbacks(cbs) # Indicator of running lr_finder self.run_finder = False def set_opt(self): if self.model: self.opt = self.opt_func(self.model.parameters(), self.lr) else: self.opt = None def default_callback(self): "get a set of default callbacks" default_cbs = [ SetupLearnerCB(), TrackTimerCB(), TrackTrainingCB(train_metrics=False, valid_metrics=True)] return default_cbs def initialize_callbacks(self, cbs): default_cbs = self.default_callback() self.cbs = update_callbacks(cbs, default_cbs) if cbs else default_cbs # add print CB self.cbs += [PrintResultsCB()] for cb in self.cbs: cb.learner = self self('init_cb') def add_callback(self, cb): if not cb: return cb.learner = self self.cbs = update_callback(cb, self.cbs) def add_callbacks(self, cbs): if not isinstance(cbs, list): cbs = [cbs] for cb in cbs: self.add_callback(cb) def remove_callback(self, cb): cb.learn = None self.cbs, removed_cb = remove_callback(cb, self.cbs) return removed_cb def remove_callbacks(self, cb_list): for cb in cb_list: self.remove_callback(cb) def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): " fit the model " self.n_epochs = n_epochs if not self.dls.valid: do_valid = False if cbs: self.add_callbacks(cbs) if lr: self.opt = self.opt_func(self.model.parameters(), lr) self('before_fit') try: for self.epoch in range(n_epochs): self('before_epoch') self.one_epoch(train=True) # if self.dls.valid: if do_valid: self.one_epoch(train=False) self('after_epoch') except KeyboardInterrupt: pass self('after_fit') def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): self.n_epochs = n_epochs self.lr_max = lr_max if lr_max else self.lr cb = OneCycleLR(lr_max=self.lr_max, pct_start=pct_start) self.fit(self.n_epochs, cbs=cb) def one_epoch(self, train): self.epoch_train() if train else self.epoch_validate() def epoch_train(self): self('before_epoch_train') self.model.train() self.dl = self.dls.train self.all_batches('train') self('after_epoch_train') def epoch_validate(self, dl=None): self('before_epoch_valid') # model at evaluation mode self.model.eval() self.dl = dl if dl else self.dls.valid if self.dl: with torch.no_grad(): self.all_batches('valid') self('after_epoch_valid') def all_batches(self, type_): # for self.num,self.batch in enumerate(progress_bar(dl, leave=False)): for num, batch in enumerate(self.dl): self.iter, self.batch = num, batch if type_ == 'train': self.batch_train() elif type_ == 'valid': self.batch_validate() elif type_ == 'predict': self.batch_predict() elif type_ == 'test': self.batch_test() def batch_train(self): self('before_batch_train') self._do_batch_train() self('after_batch_train') def batch_validate(self): self('before_batch_valid') self._do_batch_validate() self('after_batch_valid') def batch_predict(self): self('before_batch_predict') self._do_batch_predict() self('after_batch_predict') def batch_test(self): self('before_batch_test') self._do_batch_test() self('after_batch_test') def _do_batch_train(self): # forward + get loss + backward + optimize self.pred, self.loss = self.train_step(self.batch) # zero the parameter gradients self.opt.zero_grad() # gradient self.loss.backward() # update weights self.opt.step() def train_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def model_forward(self): self('before_forward') self.pred = self.model(self.xb) self('after_forward') return self.pred def _do_batch_validate(self): # forward + calculate loss self.pred, self.loss = self.valid_step(self.batch) def valid_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def _do_batch_predict(self): self.pred = self.predict_step(self.batch) def predict_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred def _do_batch_test(self): self.pred, self.yb = self.test_step(self.batch) def test_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred, self.yb def _predict(self, dl=None): # self('before_validate') self('before_predict') if dl is None: return self.dl = dl self.n_inp = dl.dataset.n_inp self.model.eval() # model at evaluation mode with torch.no_grad(): self.all_batches('predict') self('after_predict') def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if weight_path is not None: self.load(weight_path) cb = GetPredictionsCB() self.add_callback(cb) test_dl = self._prepare_data(test_data, Dataset, Dataloader, batch_size) self._predict(test_dl) self.preds = cb.preds return to_numpy(self.preds) def test(self, dl, weight_path=None, scores=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if dl is None: return else: self.dl = dl if weight_path is not None: self.load(weight_path) cb = GetTestCB() self.add_callback(cb) self('before_test') self.model.eval() with torch.no_grad(): self.all_batches('test') self('after_test') self.preds, self.targets = to_numpy([cb.preds, cb.targets]) # calculate scores if scores: s_vals = [score(cb.targets, cb.preds).to('cpu').numpy() for score in list(scores)] return self.preds, self.targets, s_vals else: return self.preds, self.targets def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): if test_data is None: return test_data if Dataset and Dataloader: test_dset = Dataset(test_data) if not batch_size: batch_size=16 test_dl = Dataloader(test_dset, batch_size) else: if self.dls: # add test_data to the dataloader defined in the dls.train test_dl = self.dls.add_dl(test_data, batch_size=batch_size) else: test_dl = test_data # assume test_data is already a form of dataloader return test_dl def get_layer_output(self, inp, layers=None, unwrap=False): """ Args: inp: can be numpy array, torch tensor or dataloader """ self.model.eval() device = next(self.model.parameters()).device if isinstance(inp, np.ndarray): inp = torch.Tensor(inp).to(device) if isinstance(inp, torch.Tensor): inp = inp.to(device) return get_layer_output(inp, model=self.model, layers=layers, unwrap=unwrap) def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): """ fintune the pretrained model. First the entire model is freezed, only head is trained up to a freeze_epochs number. Then the model is unfreezed and the entire model is trained """ assert (n_epochs>0)|(freeze_epochs>0), "Either n_epochs or freeze_epochs has to be > 0" if not base_lr: base_lr = self.lr # Finetune the head of freeze_epochs > 0: if freeze_epochs > 0: print('Finetune the head') self.freeze() self.fit_one_cycle(freeze_epochs, lr_max=base_lr, pct_start=pct_start) # Finetune the entire network if n_epochs > 0 if n_epochs > 0: print('Finetune the entire network') self.unfreeze() self.fit_one_cycle(n_epochs, lr_max=base_lr/2, pct_start=pct_start) def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): """ linear probing the pretrained model. The model is freeze except the head during finetuning """ assert (n_epochs>0), "n_epochs has to be > 0" if not base_lr: base_lr = self.lr print('Finetune the head') self.freeze() self.fit_one_cycle(n_epochs, lr_max=base_lr, pct_start=pct_start) def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): """ find the learning rate """ n_epochs = num_iter//len(self.dls.train) + 1 # indicator of lr_finder method is applied self.run_finder = True # add LRFinderCB to callback list and will remove later cb = LRFinderCB(start_lr, end_lr, num_iter, step_mode, suggestion=suggestion) # fit self.fit(n_epochs=n_epochs, cbs=cb, do_valid=False) # should remove LRFinderCB callback after fitting self.remove_callback(cb) self.run_finder = False if show_plot: cb.plot_lr_find() if suggestion: return cb.suggested_lr def freeze(self): """ freeze the model head require the model to have head attribute """ if hasattr(get_model(self.model), 'head'): # print('model head is available') for param in get_model(self.model).parameters(): param.requires_grad = False for param in get_model(self.model).head.parameters(): param.requires_grad = True # print('model is frozen except the head') def unfreeze(self): for param in get_model(self.model).parameters(): param.requires_grad = True def __call__(self, name): for cb in self.cbs: attr = getattr(cb, name) if attr is not None: attr() def save(self, fname, path, **kwargs): """ Save model and optimizer state (if `with_opt`) to `self.path/file` """ fname = join_path_file(fname, path, ext='.pth') save_model(fname, self.model, getattr(self,'opt',None), **kwargs) return fname def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): """ load the model """ if not torch.cuda.is_available(): device = "cpu" load_model(fname, self.model, self.opt, with_opt, device=device, strict=strict) def get_params(self, deep=True, **kwargs): params = BaseEstimator.get_params(self, deep=deep, **kwargs) return params def _get_param_names(self): return (k for k in self.__dict__ if not k.endswith('_')) def set_params(self, **kwargs): params = {} for key, val in kwargs.items(): params[key] = val BaseEstimator.set_params(self, **params) def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): local_rank = int(os.environ.get('LOCAL_RANK')) world_size = int(os.environ.get('WORLD_SIZE')) rank = int(os.environ.get('RANK')) print('Process {} (out of {})'.format( rank, torch.distributed.get_world_size())) self.add_callback(DistributedTrainer(local_rank=local_rank, world_size=world_size, sync_bn=sync_bn, **kwargs)) return self def transfer_weights(weights_path, model, exclude_head=True, device='cpu'): # state_dict = model.state_dict() new_state_dict = torch.load(weights_path, map_location=device) #print('new_state_dict',new_state_dict) matched_layers = 0 unmatched_layers = [] for name, param in model.state_dict().items(): if exclude_head and 'head' in name: continue if name in new_state_dict: matched_layers += 1 input_param = new_state_dict[name] if input_param.shape == param.shape: param.copy_(input_param) else: unmatched_layers.append(name) else: unmatched_layers.append(name) pass # these are weights that weren't in the original model, such as a new head if matched_layers == 0: raise Exception("No shared weight names were found between the models") else: if len(unmatched_layers) > 0: print(f'check unmatched_layers: {unmatched_layers}') else: print(f"weights from {weights_path} successfully transferred!\n") model = model.to(device) return model def finetune_func(lr=args.lr): print('end-to-end finetuning') # get dataloader dls = get_dls(args) # get model model = get_model(dls.vars, args, head_type='prediction') # transfer weight # weight_path = args.pretrained_model + '.pth' model = transfer_weights(args.pretrained_model, model) # get loss loss_func = torch.nn.MSELoss(reduction='mean') # get callbacks cbs = [RevInCB(dls.vars, denorm=True)] if args.revin else [] cbs += [ PatchCB(patch_len=args.patch_len, stride=args.stride), SaveModelCB(monitor='valid_loss', fname=args.save_finetuned_model, path=args.save_path) ] # define learner learn = Learner(dls, model, loss_func, lr=lr, cbs=cbs, metrics=[mse] ) # fit the data to the model #learn.fit_one_cycle(n_epochs=args.n_epochs_finetune, lr_max=lr) learn.fine_tune(n_epochs=args.n_epochs_finetune, base_lr=lr, freeze_epochs=10) save_recorders(learn)
null
13,448
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner, transfer_weights from src.callback.core import * from src.callback.tracking import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from src.basics import set_device from datautils import * import argparse args = parser.parse_args() print('args:', args) args.save_path = 'saved_models/' + args.dset_finetune + '/masked_patchtst/' + args.model_type + '/' if args.is_finetune: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name elif args.is_linear_probe: args.save_finetuned_model = args.dset_finetune+'_patchtst_linear-probe'+suffix_name else: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name def get_model(c_in, args, head_type, weight_path=None): def save_recorders(learn): class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): def set_opt(self): def default_callback(self): def initialize_callbacks(self, cbs): def add_callback(self, cb): def add_callbacks(self, cbs): def remove_callback(self, cb): def remove_callbacks(self, cb_list): def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): def one_epoch(self, train): def epoch_train(self): def epoch_validate(self, dl=None): def all_batches(self, type_): def batch_train(self): def batch_validate(self): def batch_predict(self): def batch_test(self): def _do_batch_train(self): def train_step(self, batch): def model_forward(self): def _do_batch_validate(self): def valid_step(self, batch): def _do_batch_predict(self): def predict_step(self, batch): def _do_batch_test(self): def test_step(self, batch): def _predict(self, dl=None): def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): def test(self, dl, weight_path=None, scores=None): def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): def get_layer_output(self, inp, layers=None, unwrap=False): def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): def freeze(self): def unfreeze(self): def __call__(self, name): def save(self, fname, path, **kwargs): def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): def get_params(self, deep=True, **kwargs): def _get_param_names(self): def set_params(self, **kwargs): def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): def transfer_weights(weights_path, model, exclude_head=True, device='cpu'): def linear_probe_func(lr=args.lr): print('linear probing') # get dataloader dls = get_dls(args) # get model model = get_model(dls.vars, args, head_type='prediction') # transfer weight # weight_path = args.save_path + args.pretrained_model + '.pth' model = transfer_weights(args.pretrained_model, model) # get loss loss_func = torch.nn.MSELoss(reduction='mean') # get callbacks cbs = [RevInCB(dls.vars, denorm=True)] if args.revin else [] cbs += [ PatchCB(patch_len=args.patch_len, stride=args.stride), SaveModelCB(monitor='valid_loss', fname=args.save_finetuned_model, path=args.save_path) ] # define learner learn = Learner(dls, model, loss_func, lr=lr, cbs=cbs, metrics=[mse] ) # fit the data to the model learn.linear_probe(n_epochs=args.n_epochs_finetune, base_lr=lr) save_recorders(learn)
null
13,449
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner, transfer_weights from src.callback.core import * from src.callback.tracking import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from src.basics import set_device from datautils import * import argparse args = parser.parse_args() print('args:', args) args.save_path = 'saved_models/' + args.dset_finetune + '/masked_patchtst/' + args.model_type + '/' if args.is_finetune: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name elif args.is_linear_probe: args.save_finetuned_model = args.dset_finetune+'_patchtst_linear-probe'+suffix_name else: args.save_finetuned_model = args.dset_finetune+'_patchtst_finetuned'+suffix_name def get_model(c_in, args, head_type, weight_path=None): """ c_in: number of variables """ # get number of patches num_patch = (max(args.context_points, args.patch_len)-args.patch_len) // args.stride + 1 print('number of patches:', num_patch) # get model model = PatchTST(c_in=c_in, target_dim=args.target_points, patch_len=args.patch_len, stride=args.stride, num_patch=num_patch, n_layers=args.n_layers, n_heads=args.n_heads, d_model=args.d_model, shared_embedding=True, d_ff=args.d_ff, dropout=args.dropout, head_dropout=args.head_dropout, act='relu', head_type=head_type, res_attention=False ) if weight_path: model = transfer_weights(weight_path, model) # print out the model size print('number of model params', sum(p.numel() for p in model.parameters() if p.requires_grad)) return model class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): self.model, self.dls, self.loss_func, self.lr = model, dls, loss_func, lr self.opt_func = opt_func #self.opt = self.opt_func(self.model.parameters(), self.lr) self.set_opt() self.metrics = metrics self.n_inp = 2 # self.n_inp = self.dls.train.dataset.n_inp if self.dls else 0 # Initialize callbacks if cbs and not isinstance(cbs, List): cbs = [cbs] self.initialize_callbacks(cbs) # Indicator of running lr_finder self.run_finder = False def set_opt(self): if self.model: self.opt = self.opt_func(self.model.parameters(), self.lr) else: self.opt = None def default_callback(self): "get a set of default callbacks" default_cbs = [ SetupLearnerCB(), TrackTimerCB(), TrackTrainingCB(train_metrics=False, valid_metrics=True)] return default_cbs def initialize_callbacks(self, cbs): default_cbs = self.default_callback() self.cbs = update_callbacks(cbs, default_cbs) if cbs else default_cbs # add print CB self.cbs += [PrintResultsCB()] for cb in self.cbs: cb.learner = self self('init_cb') def add_callback(self, cb): if not cb: return cb.learner = self self.cbs = update_callback(cb, self.cbs) def add_callbacks(self, cbs): if not isinstance(cbs, list): cbs = [cbs] for cb in cbs: self.add_callback(cb) def remove_callback(self, cb): cb.learn = None self.cbs, removed_cb = remove_callback(cb, self.cbs) return removed_cb def remove_callbacks(self, cb_list): for cb in cb_list: self.remove_callback(cb) def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): " fit the model " self.n_epochs = n_epochs if not self.dls.valid: do_valid = False if cbs: self.add_callbacks(cbs) if lr: self.opt = self.opt_func(self.model.parameters(), lr) self('before_fit') try: for self.epoch in range(n_epochs): self('before_epoch') self.one_epoch(train=True) # if self.dls.valid: if do_valid: self.one_epoch(train=False) self('after_epoch') except KeyboardInterrupt: pass self('after_fit') def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): self.n_epochs = n_epochs self.lr_max = lr_max if lr_max else self.lr cb = OneCycleLR(lr_max=self.lr_max, pct_start=pct_start) self.fit(self.n_epochs, cbs=cb) def one_epoch(self, train): self.epoch_train() if train else self.epoch_validate() def epoch_train(self): self('before_epoch_train') self.model.train() self.dl = self.dls.train self.all_batches('train') self('after_epoch_train') def epoch_validate(self, dl=None): self('before_epoch_valid') # model at evaluation mode self.model.eval() self.dl = dl if dl else self.dls.valid if self.dl: with torch.no_grad(): self.all_batches('valid') self('after_epoch_valid') def all_batches(self, type_): # for self.num,self.batch in enumerate(progress_bar(dl, leave=False)): for num, batch in enumerate(self.dl): self.iter, self.batch = num, batch if type_ == 'train': self.batch_train() elif type_ == 'valid': self.batch_validate() elif type_ == 'predict': self.batch_predict() elif type_ == 'test': self.batch_test() def batch_train(self): self('before_batch_train') self._do_batch_train() self('after_batch_train') def batch_validate(self): self('before_batch_valid') self._do_batch_validate() self('after_batch_valid') def batch_predict(self): self('before_batch_predict') self._do_batch_predict() self('after_batch_predict') def batch_test(self): self('before_batch_test') self._do_batch_test() self('after_batch_test') def _do_batch_train(self): # forward + get loss + backward + optimize self.pred, self.loss = self.train_step(self.batch) # zero the parameter gradients self.opt.zero_grad() # gradient self.loss.backward() # update weights self.opt.step() def train_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def model_forward(self): self('before_forward') self.pred = self.model(self.xb) self('after_forward') return self.pred def _do_batch_validate(self): # forward + calculate loss self.pred, self.loss = self.valid_step(self.batch) def valid_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def _do_batch_predict(self): self.pred = self.predict_step(self.batch) def predict_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred def _do_batch_test(self): self.pred, self.yb = self.test_step(self.batch) def test_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred, self.yb def _predict(self, dl=None): # self('before_validate') self('before_predict') if dl is None: return self.dl = dl self.n_inp = dl.dataset.n_inp self.model.eval() # model at evaluation mode with torch.no_grad(): self.all_batches('predict') self('after_predict') def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if weight_path is not None: self.load(weight_path) cb = GetPredictionsCB() self.add_callback(cb) test_dl = self._prepare_data(test_data, Dataset, Dataloader, batch_size) self._predict(test_dl) self.preds = cb.preds return to_numpy(self.preds) def test(self, dl, weight_path=None, scores=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if dl is None: return else: self.dl = dl if weight_path is not None: self.load(weight_path) cb = GetTestCB() self.add_callback(cb) self('before_test') self.model.eval() with torch.no_grad(): self.all_batches('test') self('after_test') self.preds, self.targets = to_numpy([cb.preds, cb.targets]) # calculate scores if scores: s_vals = [score(cb.targets, cb.preds).to('cpu').numpy() for score in list(scores)] return self.preds, self.targets, s_vals else: return self.preds, self.targets def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): if test_data is None: return test_data if Dataset and Dataloader: test_dset = Dataset(test_data) if not batch_size: batch_size=16 test_dl = Dataloader(test_dset, batch_size) else: if self.dls: # add test_data to the dataloader defined in the dls.train test_dl = self.dls.add_dl(test_data, batch_size=batch_size) else: test_dl = test_data # assume test_data is already a form of dataloader return test_dl def get_layer_output(self, inp, layers=None, unwrap=False): """ Args: inp: can be numpy array, torch tensor or dataloader """ self.model.eval() device = next(self.model.parameters()).device if isinstance(inp, np.ndarray): inp = torch.Tensor(inp).to(device) if isinstance(inp, torch.Tensor): inp = inp.to(device) return get_layer_output(inp, model=self.model, layers=layers, unwrap=unwrap) def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): """ fintune the pretrained model. First the entire model is freezed, only head is trained up to a freeze_epochs number. Then the model is unfreezed and the entire model is trained """ assert (n_epochs>0)|(freeze_epochs>0), "Either n_epochs or freeze_epochs has to be > 0" if not base_lr: base_lr = self.lr # Finetune the head of freeze_epochs > 0: if freeze_epochs > 0: print('Finetune the head') self.freeze() self.fit_one_cycle(freeze_epochs, lr_max=base_lr, pct_start=pct_start) # Finetune the entire network if n_epochs > 0 if n_epochs > 0: print('Finetune the entire network') self.unfreeze() self.fit_one_cycle(n_epochs, lr_max=base_lr/2, pct_start=pct_start) def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): """ linear probing the pretrained model. The model is freeze except the head during finetuning """ assert (n_epochs>0), "n_epochs has to be > 0" if not base_lr: base_lr = self.lr print('Finetune the head') self.freeze() self.fit_one_cycle(n_epochs, lr_max=base_lr, pct_start=pct_start) def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): """ find the learning rate """ n_epochs = num_iter//len(self.dls.train) + 1 # indicator of lr_finder method is applied self.run_finder = True # add LRFinderCB to callback list and will remove later cb = LRFinderCB(start_lr, end_lr, num_iter, step_mode, suggestion=suggestion) # fit self.fit(n_epochs=n_epochs, cbs=cb, do_valid=False) # should remove LRFinderCB callback after fitting self.remove_callback(cb) self.run_finder = False if show_plot: cb.plot_lr_find() if suggestion: return cb.suggested_lr def freeze(self): """ freeze the model head require the model to have head attribute """ if hasattr(get_model(self.model), 'head'): # print('model head is available') for param in get_model(self.model).parameters(): param.requires_grad = False for param in get_model(self.model).head.parameters(): param.requires_grad = True # print('model is frozen except the head') def unfreeze(self): for param in get_model(self.model).parameters(): param.requires_grad = True def __call__(self, name): for cb in self.cbs: attr = getattr(cb, name) if attr is not None: attr() def save(self, fname, path, **kwargs): """ Save model and optimizer state (if `with_opt`) to `self.path/file` """ fname = join_path_file(fname, path, ext='.pth') save_model(fname, self.model, getattr(self,'opt',None), **kwargs) return fname def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): """ load the model """ if not torch.cuda.is_available(): device = "cpu" load_model(fname, self.model, self.opt, with_opt, device=device, strict=strict) def get_params(self, deep=True, **kwargs): params = BaseEstimator.get_params(self, deep=deep, **kwargs) return params def _get_param_names(self): return (k for k in self.__dict__ if not k.endswith('_')) def set_params(self, **kwargs): params = {} for key, val in kwargs.items(): params[key] = val BaseEstimator.set_params(self, **params) def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): local_rank = int(os.environ.get('LOCAL_RANK')) world_size = int(os.environ.get('WORLD_SIZE')) rank = int(os.environ.get('RANK')) print('Process {} (out of {})'.format( rank, torch.distributed.get_world_size())) self.add_callback(DistributedTrainer(local_rank=local_rank, world_size=world_size, sync_bn=sync_bn, **kwargs)) return self def test_func(weight_path): # get dataloader dls = get_dls(args) model = get_model(dls.vars, args, head_type='prediction').to('cuda') # get callbacks cbs = [RevInCB(dls.vars, denorm=True)] if args.revin else [] cbs += [PatchCB(patch_len=args.patch_len, stride=args.stride)] learn = Learner(dls, model,cbs=cbs) out = learn.test(dls.test, weight_path=weight_path+'.pth', scores=[mse,mae]) # out: a list of [pred, targ, score] print('score:', out[2]) # save results pd.DataFrame(np.array(out[2]).reshape(1,-1), columns=['mse','mae']).to_csv(args.save_path + args.save_finetuned_model + '_acc.csv', float_format='%.6f', index=False) return out
null
13,450
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner from src.callback.core import * from src.callback.tracking import * from src.callback.scheduler import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from datautils import get_dls import argparse args = parser.parse_args() args.save_model_name = 'patchtst_supervised'+'_cw'+str(args.context_points)+'_tw'+str(args.target_points) + '_patch'+str(args.patch_len) + '_stride'+str(args.stride)+'_epochs'+str(args.n_epochs) + '_model' + str(args.model_id) args.save_path = 'saved_models/' + args.dset + '/patchtst_supervised/' + args.model_type + '/' def get_model(c_in, args): """ c_in: number of input variables """ # get number of patches num_patch = (max(args.context_points, args.patch_len)-args.patch_len) // args.stride + 1 print('number of patches:', num_patch) # get model model = PatchTST(c_in=c_in, target_dim=args.target_points, patch_len=args.patch_len, stride=args.stride, num_patch=num_patch, n_layers=args.n_layers, n_heads=args.n_heads, d_model=args.d_model, shared_embedding=True, d_ff=args.d_ff, dropout=args.dropout, head_dropout=args.head_dropout, act='relu', head_type='prediction', res_attention=False ) return model class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): self.model, self.dls, self.loss_func, self.lr = model, dls, loss_func, lr self.opt_func = opt_func #self.opt = self.opt_func(self.model.parameters(), self.lr) self.set_opt() self.metrics = metrics self.n_inp = 2 # self.n_inp = self.dls.train.dataset.n_inp if self.dls else 0 # Initialize callbacks if cbs and not isinstance(cbs, List): cbs = [cbs] self.initialize_callbacks(cbs) # Indicator of running lr_finder self.run_finder = False def set_opt(self): if self.model: self.opt = self.opt_func(self.model.parameters(), self.lr) else: self.opt = None def default_callback(self): "get a set of default callbacks" default_cbs = [ SetupLearnerCB(), TrackTimerCB(), TrackTrainingCB(train_metrics=False, valid_metrics=True)] return default_cbs def initialize_callbacks(self, cbs): default_cbs = self.default_callback() self.cbs = update_callbacks(cbs, default_cbs) if cbs else default_cbs # add print CB self.cbs += [PrintResultsCB()] for cb in self.cbs: cb.learner = self self('init_cb') def add_callback(self, cb): if not cb: return cb.learner = self self.cbs = update_callback(cb, self.cbs) def add_callbacks(self, cbs): if not isinstance(cbs, list): cbs = [cbs] for cb in cbs: self.add_callback(cb) def remove_callback(self, cb): cb.learn = None self.cbs, removed_cb = remove_callback(cb, self.cbs) return removed_cb def remove_callbacks(self, cb_list): for cb in cb_list: self.remove_callback(cb) def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): " fit the model " self.n_epochs = n_epochs if not self.dls.valid: do_valid = False if cbs: self.add_callbacks(cbs) if lr: self.opt = self.opt_func(self.model.parameters(), lr) self('before_fit') try: for self.epoch in range(n_epochs): self('before_epoch') self.one_epoch(train=True) # if self.dls.valid: if do_valid: self.one_epoch(train=False) self('after_epoch') except KeyboardInterrupt: pass self('after_fit') def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): self.n_epochs = n_epochs self.lr_max = lr_max if lr_max else self.lr cb = OneCycleLR(lr_max=self.lr_max, pct_start=pct_start) self.fit(self.n_epochs, cbs=cb) def one_epoch(self, train): self.epoch_train() if train else self.epoch_validate() def epoch_train(self): self('before_epoch_train') self.model.train() self.dl = self.dls.train self.all_batches('train') self('after_epoch_train') def epoch_validate(self, dl=None): self('before_epoch_valid') # model at evaluation mode self.model.eval() self.dl = dl if dl else self.dls.valid if self.dl: with torch.no_grad(): self.all_batches('valid') self('after_epoch_valid') def all_batches(self, type_): # for self.num,self.batch in enumerate(progress_bar(dl, leave=False)): for num, batch in enumerate(self.dl): self.iter, self.batch = num, batch if type_ == 'train': self.batch_train() elif type_ == 'valid': self.batch_validate() elif type_ == 'predict': self.batch_predict() elif type_ == 'test': self.batch_test() def batch_train(self): self('before_batch_train') self._do_batch_train() self('after_batch_train') def batch_validate(self): self('before_batch_valid') self._do_batch_validate() self('after_batch_valid') def batch_predict(self): self('before_batch_predict') self._do_batch_predict() self('after_batch_predict') def batch_test(self): self('before_batch_test') self._do_batch_test() self('after_batch_test') def _do_batch_train(self): # forward + get loss + backward + optimize self.pred, self.loss = self.train_step(self.batch) # zero the parameter gradients self.opt.zero_grad() # gradient self.loss.backward() # update weights self.opt.step() def train_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def model_forward(self): self('before_forward') self.pred = self.model(self.xb) self('after_forward') return self.pred def _do_batch_validate(self): # forward + calculate loss self.pred, self.loss = self.valid_step(self.batch) def valid_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() # compute loss loss = self.loss_func(pred, self.yb) return pred, loss def _do_batch_predict(self): self.pred = self.predict_step(self.batch) def predict_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred def _do_batch_test(self): self.pred, self.yb = self.test_step(self.batch) def test_step(self, batch): # get the inputs self.xb, self.yb = batch # forward pred = self.model_forward() return pred, self.yb def _predict(self, dl=None): # self('before_validate') self('before_predict') if dl is None: return self.dl = dl self.n_inp = dl.dataset.n_inp self.model.eval() # model at evaluation mode with torch.no_grad(): self.all_batches('predict') self('after_predict') def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if weight_path is not None: self.load(weight_path) cb = GetPredictionsCB() self.add_callback(cb) test_dl = self._prepare_data(test_data, Dataset, Dataloader, batch_size) self._predict(test_dl) self.preds = cb.preds return to_numpy(self.preds) def test(self, dl, weight_path=None, scores=None): """_summary_ Args: test_data can be a tensor, numpy array, dataset or dataloader Returns: _type_: _description_ """ if dl is None: return else: self.dl = dl if weight_path is not None: self.load(weight_path) cb = GetTestCB() self.add_callback(cb) self('before_test') self.model.eval() with torch.no_grad(): self.all_batches('test') self('after_test') self.preds, self.targets = to_numpy([cb.preds, cb.targets]) # calculate scores if scores: s_vals = [score(cb.targets, cb.preds).to('cpu').numpy() for score in list(scores)] return self.preds, self.targets, s_vals else: return self.preds, self.targets def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): if test_data is None: return test_data if Dataset and Dataloader: test_dset = Dataset(test_data) if not batch_size: batch_size=16 test_dl = Dataloader(test_dset, batch_size) else: if self.dls: # add test_data to the dataloader defined in the dls.train test_dl = self.dls.add_dl(test_data, batch_size=batch_size) else: test_dl = test_data # assume test_data is already a form of dataloader return test_dl def get_layer_output(self, inp, layers=None, unwrap=False): """ Args: inp: can be numpy array, torch tensor or dataloader """ self.model.eval() device = next(self.model.parameters()).device if isinstance(inp, np.ndarray): inp = torch.Tensor(inp).to(device) if isinstance(inp, torch.Tensor): inp = inp.to(device) return get_layer_output(inp, model=self.model, layers=layers, unwrap=unwrap) def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): """ fintune the pretrained model. First the entire model is freezed, only head is trained up to a freeze_epochs number. Then the model is unfreezed and the entire model is trained """ assert (n_epochs>0)|(freeze_epochs>0), "Either n_epochs or freeze_epochs has to be > 0" if not base_lr: base_lr = self.lr # Finetune the head of freeze_epochs > 0: if freeze_epochs > 0: print('Finetune the head') self.freeze() self.fit_one_cycle(freeze_epochs, lr_max=base_lr, pct_start=pct_start) # Finetune the entire network if n_epochs > 0 if n_epochs > 0: print('Finetune the entire network') self.unfreeze() self.fit_one_cycle(n_epochs, lr_max=base_lr/2, pct_start=pct_start) def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): """ linear probing the pretrained model. The model is freeze except the head during finetuning """ assert (n_epochs>0), "n_epochs has to be > 0" if not base_lr: base_lr = self.lr print('Finetune the head') self.freeze() self.fit_one_cycle(n_epochs, lr_max=base_lr, pct_start=pct_start) def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): """ find the learning rate """ n_epochs = num_iter//len(self.dls.train) + 1 # indicator of lr_finder method is applied self.run_finder = True # add LRFinderCB to callback list and will remove later cb = LRFinderCB(start_lr, end_lr, num_iter, step_mode, suggestion=suggestion) # fit self.fit(n_epochs=n_epochs, cbs=cb, do_valid=False) # should remove LRFinderCB callback after fitting self.remove_callback(cb) self.run_finder = False if show_plot: cb.plot_lr_find() if suggestion: return cb.suggested_lr def freeze(self): """ freeze the model head require the model to have head attribute """ if hasattr(get_model(self.model), 'head'): # print('model head is available') for param in get_model(self.model).parameters(): param.requires_grad = False for param in get_model(self.model).head.parameters(): param.requires_grad = True # print('model is frozen except the head') def unfreeze(self): for param in get_model(self.model).parameters(): param.requires_grad = True def __call__(self, name): for cb in self.cbs: attr = getattr(cb, name) if attr is not None: attr() def save(self, fname, path, **kwargs): """ Save model and optimizer state (if `with_opt`) to `self.path/file` """ fname = join_path_file(fname, path, ext='.pth') save_model(fname, self.model, getattr(self,'opt',None), **kwargs) return fname def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): """ load the model """ if not torch.cuda.is_available(): device = "cpu" load_model(fname, self.model, self.opt, with_opt, device=device, strict=strict) def get_params(self, deep=True, **kwargs): params = BaseEstimator.get_params(self, deep=deep, **kwargs) return params def _get_param_names(self): return (k for k in self.__dict__ if not k.endswith('_')) def set_params(self, **kwargs): params = {} for key, val in kwargs.items(): params[key] = val BaseEstimator.set_params(self, **params) def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): local_rank = int(os.environ.get('LOCAL_RANK')) world_size = int(os.environ.get('WORLD_SIZE')) rank = int(os.environ.get('RANK')) print('Process {} (out of {})'.format( rank, torch.distributed.get_world_size())) self.add_callback(DistributedTrainer(local_rank=local_rank, world_size=world_size, sync_bn=sync_bn, **kwargs)) return self def get_dls(params): assert params.dset in DSETS, f"Unrecognized dset (`{params.dset}`). Options include: {DSETS}" if not hasattr(params,'use_time_features'): params.use_time_features = False if params.dset == 'ettm1': root_path = '/data/datasets/public/ETDataset/ETT-small/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_ETT_minute, dataset_kwargs={ 'root_path': root_path, 'data_path': 'ETTm1.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'ettm2': root_path = '/data/datasets/public/ETDataset/ETT-small/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_ETT_minute, dataset_kwargs={ 'root_path': root_path, 'data_path': 'ETTm2.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'etth1': root_path = '/data/datasets/public/ETDataset/ETT-small/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_ETT_hour, dataset_kwargs={ 'root_path': root_path, 'data_path': 'ETTh1.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'etth2': root_path = '/data/datasets/public/ETDataset/ETT-small/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_ETT_hour, dataset_kwargs={ 'root_path': root_path, 'data_path': 'ETTh2.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'electricity': root_path = '/data/datasets/public/electricity/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_Custom, dataset_kwargs={ 'root_path': root_path, 'data_path': 'electricity.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'traffic': root_path = '/data/datasets/public/traffic/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_Custom, dataset_kwargs={ 'root_path': root_path, 'data_path': 'traffic.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'weather': root_path = '/data/datasets/public/weather/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_Custom, dataset_kwargs={ 'root_path': root_path, 'data_path': 'weather.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'illness': root_path = '/data/datasets/public/illness/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_Custom, dataset_kwargs={ 'root_path': root_path, 'data_path': 'national_illness.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) elif params.dset == 'exchange': root_path = '/data/datasets/public/exchange_rate/' size = [params.context_points, 0, params.target_points] dls = DataLoaders( datasetCls=Dataset_Custom, dataset_kwargs={ 'root_path': root_path, 'data_path': 'exchange_rate.csv', 'features': params.features, 'scale': True, 'size': size, 'use_time_features': params.use_time_features }, batch_size=params.batch_size, workers=params.num_workers, ) # dataset is assume to have dimension len x nvars dls.vars, dls.len = dls.train.dataset[0][0].shape[1], params.context_points dls.c = dls.train.dataset[0][1].shape[0] return dls def find_lr(): # get dataloader dls = get_dls(args) model = get_model(dls.vars, args) # get loss loss_func = torch.nn.MSELoss(reduction='mean') # get callbacks cbs = [RevInCB(dls.vars)] if args.revin else [] cbs += [PatchCB(patch_len=args.patch_len, stride=args.stride)] # define learner learn = Learner(dls, model, loss_func, cbs=cbs) # fit the data to the model return learn.lr_finder()
null
13,451
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner from src.callback.core import * from src.callback.tracking import * from src.callback.scheduler import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from datautils import get_dls import argparse args = parser.parse_args() print('args:', args) args.save_model_name = 'patchtst_supervised'+'_cw'+str(args.context_points)+'_tw'+str(args.target_points) + '_patch'+str(args.patch_len) + '_stride'+str(args.stride)+'_epochs'+str(args.n_epochs) + '_model' + str(args.model_id) args.save_path = 'saved_models/' + args.dset + '/patchtst_supervised/' + args.model_type + '/' def get_model(c_in, args): class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): def set_opt(self): def default_callback(self): def initialize_callbacks(self, cbs): def add_callback(self, cb): def add_callbacks(self, cbs): def remove_callback(self, cb): def remove_callbacks(self, cb_list): def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): def one_epoch(self, train): def epoch_train(self): def epoch_validate(self, dl=None): def all_batches(self, type_): def batch_train(self): def batch_validate(self): def batch_predict(self): def batch_test(self): def _do_batch_train(self): def train_step(self, batch): def model_forward(self): def _do_batch_validate(self): def valid_step(self, batch): def _do_batch_predict(self): def predict_step(self, batch): def _do_batch_test(self): def test_step(self, batch): def _predict(self, dl=None): def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): def test(self, dl, weight_path=None, scores=None): def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): def get_layer_output(self, inp, layers=None, unwrap=False): def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): def freeze(self): def unfreeze(self): def __call__(self, name): def save(self, fname, path, **kwargs): def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): def get_params(self, deep=True, **kwargs): def _get_param_names(self): def set_params(self, **kwargs): def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): def get_dls(params): def train_func(lr=args.lr): # get dataloader dls = get_dls(args) print('in out', dls.vars, dls.c, dls.len) # get model model = get_model(dls.vars, args) # get loss loss_func = torch.nn.MSELoss(reduction='mean') # get callbacks cbs = [RevInCB(dls.vars)] if args.revin else [] cbs += [ PatchCB(patch_len=args.patch_len, stride=args.stride), SaveModelCB(monitor='valid_loss', fname=args.save_model_name, path=args.save_path ) ] # define learner learn = Learner(dls, model, loss_func, lr=lr, cbs=cbs, metrics=[mse] ) # fit the data to the model learn.fit_one_cycle(n_epochs=args.n_epochs, lr_max=lr, pct_start=0.2)
null
13,452
import numpy as np import pandas as pd import os import torch from torch import nn from src.models.patchTST import PatchTST from src.learner import Learner from src.callback.core import * from src.callback.tracking import * from src.callback.scheduler import * from src.callback.patch_mask import * from src.callback.transforms import * from src.metrics import * from datautils import get_dls import argparse args = parser.parse_args() args.save_model_name = 'patchtst_supervised'+'_cw'+str(args.context_points)+'_tw'+str(args.target_points) + '_patch'+str(args.patch_len) + '_stride'+str(args.stride)+'_epochs'+str(args.n_epochs) + '_model' + str(args.model_id) args.save_path = 'saved_models/' + args.dset + '/patchtst_supervised/' + args.model_type + '/' def get_model(c_in, args): class Learner(GetAttr): def __init__(self, dls, model, loss_func=None, lr=1e-3, cbs=None, metrics=None, opt_func=Adam, **kwargs): def set_opt(self): def default_callback(self): def initialize_callbacks(self, cbs): def add_callback(self, cb): def add_callbacks(self, cbs): def remove_callback(self, cb): def remove_callbacks(self, cb_list): def fit(self, n_epochs, lr=None, cbs=None, do_valid=True): def fit_one_cycle(self, n_epochs, lr_max=None, pct_start=0.3): def one_epoch(self, train): def epoch_train(self): def epoch_validate(self, dl=None): def all_batches(self, type_): def batch_train(self): def batch_validate(self): def batch_predict(self): def batch_test(self): def _do_batch_train(self): def train_step(self, batch): def model_forward(self): def _do_batch_validate(self): def valid_step(self, batch): def _do_batch_predict(self): def predict_step(self, batch): def _do_batch_test(self): def test_step(self, batch): def _predict(self, dl=None): def predict(self, test_data, weight_path=None, Dataset=None, Dataloader=None, batch_size=None): def test(self, dl, weight_path=None, scores=None): def _prepare_data(self, test_data, Dataset=None, Dataloader=None, batch_size=None): def get_layer_output(self, inp, layers=None, unwrap=False): def fine_tune(self, n_epochs, base_lr=None, freeze_epochs=1, pct_start=0.3): def linear_probe(self, n_epochs, base_lr=None, pct_start=0.3): def lr_finder(self, start_lr=1e-7, end_lr=10, num_iter=100, step_mode='exp', show_plot=True, suggestion='valley'): def freeze(self): def unfreeze(self): def __call__(self, name): def save(self, fname, path, **kwargs): def load(self, fname, with_opt=False, device='cuda', strict=True, **kwargs): def get_params(self, deep=True, **kwargs): def _get_param_names(self): def set_params(self, **kwargs): def to_distributed(self, sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm` **kwargs ): def get_dls(params): def test_func(): weight_path = args.save_path + args.save_model_name + '.pth' # get dataloader dls = get_dls(args) model = get_model(dls.vars, args) #model = torch.load(weight_path) # get callbacks cbs = [RevInCB(dls.vars)] if args.revin else [] cbs += [PatchCB(patch_len=args.patch_len, stride=args.stride)] learn = Learner(dls, model,cbs=cbs) out = learn.test(dls.test, weight_path=weight_path, scores=[mse,mae]) # out: a list of [pred, targ, score_values] return out
null
13,453
import torch from torch import nn import math def get_activation_fn(activation): if callable(activation): return activation() elif activation.lower() == "relu": return nn.ReLU() elif activation.lower() == "gelu": return nn.GELU() raise ValueError(f'{activation} is not available. You can use "relu", "gelu", or a callable')
null
13,454
import torch from torch import nn import math def PositionalEncoding(q_len, d_model, normalize=True): pe = torch.zeros(q_len, d_model) position = torch.arange(0, q_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) if normalize: pe = pe - pe.mean() pe = pe / (pe.std() * 10) return pe def Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True, eps=1e-3, verbose=False): x = .5 if exponential else 1 i = 0 for i in range(100): cpe = 2 * (torch.linspace(0, 1, q_len).reshape(-1, 1) ** x) * (torch.linspace(0, 1, d_model).reshape(1, -1) ** x) - 1 pv(f'{i:4.0f} {x:5.3f} {cpe.mean():+6.3f}', verbose) if abs(cpe.mean()) <= eps: break elif cpe.mean() > eps: x += .001 else: x -= .001 i += 1 if normalize: cpe = cpe - cpe.mean() cpe = cpe / (cpe.std() * 10) return cpe def Coord1dPosEncoding(q_len, exponential=False, normalize=True): cpe = (2 * (torch.linspace(0, 1, q_len).reshape(-1, 1)**(.5 if exponential else 1)) - 1) if normalize: cpe = cpe - cpe.mean() cpe = cpe / (cpe.std() * 10) return cpe def positional_encoding(pe, learn_pe, q_len, d_model): # Positional encoding if pe == None: W_pos = torch.empty((q_len, d_model)) # pe = None and learn_pe = False can be used to measure impact of pe nn.init.uniform_(W_pos, -0.02, 0.02) learn_pe = False elif pe == 'zero': W_pos = torch.empty((q_len, 1)) nn.init.uniform_(W_pos, -0.02, 0.02) elif pe == 'zeros': W_pos = torch.empty((q_len, d_model)) nn.init.uniform_(W_pos, -0.02, 0.02) elif pe == 'normal' or pe == 'gauss': W_pos = torch.zeros((q_len, 1)) torch.nn.init.normal_(W_pos, mean=0.0, std=0.1) elif pe == 'uniform': W_pos = torch.zeros((q_len, 1)) nn.init.uniform_(W_pos, a=0.0, b=0.1) elif pe == 'lin1d': W_pos = Coord1dPosEncoding(q_len, exponential=False, normalize=True) elif pe == 'exp1d': W_pos = Coord1dPosEncoding(q_len, exponential=True, normalize=True) elif pe == 'lin2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True) elif pe == 'exp2d': W_pos = Coord2dPosEncoding(q_len, d_model, exponential=True, normalize=True) elif pe == 'sincos': W_pos = PositionalEncoding(q_len, d_model, normalize=True) else: raise ValueError(f"{pe} is not a valid pe (positional encoder. Available types: 'gauss'=='normal', \ 'zeros', 'zero', uniform', 'lin1d', 'exp1d', 'lin2d', 'exp2d', 'sincos', None.)") return nn.Parameter(W_pos, requires_grad=learn_pe)
null
13,455
import argparse import numpy as np import time import torch import torch.optim as optim import pyraformer.Pyraformer_LR as Pyraformer from tqdm import tqdm from data_loader import * from utils.tools import TopkMSELoss, metric The provided code snippet includes necessary dependencies for implementing the `dataset_parameters` function. Write a Python function `def dataset_parameters(args, dataset)` to solve the following problem: Prepare specific parameters for different datasets Here is the function: def dataset_parameters(args, dataset): """Prepare specific parameters for different datasets""" dataset2enc_in = { 'ETTh1':7, 'ETTh2':7, 'ETTm1':7, 'ETTm2':7, 'electricity':321, 'exchange':8, 'traffic':862, 'weather':21, 'ili':7, 'flow': 1, 'synthetic': 1 } dataset2cov_size = { 'ETTh1':4, 'ETTh2':4, 'ETTm1':4, 'ETTm2':4, 'electricity':4, 'exchange':4, 'traffic':4, 'weather':4, 'ili':4, 'elect':3, 'flow': 3, 'synthetic': 3, } dataset2seq_num = { 'ETTh1':1, 'ETTh2':1, 'ETTm1':1, 'ETTm2':1, 'electricity':1, 'exchange':1, 'traffic':1, 'weather':1, 'ili':1, 'elect':321, 'flow': 1077, 'synthetic': 60 } dataset2embed = { 'ETTh1':'DataEmbedding', 'ETTh2':'DataEmbedding', 'ETTm1':'DataEmbedding', 'ETTm2':'DataEmbedding', 'elect':'CustomEmbedding', 'electricity':'CustomEmbedding', 'exchange':'CustomEmbedding', 'traffic':'CustomEmbedding', 'weather':'CustomEmbedding', 'ili':'CustomEmbedding', 'flow': 'CustomEmbedding', 'synthetic': 'CustomEmbedding' } args.enc_in = dataset2enc_in[dataset] args.dec_in = dataset2enc_in[dataset] args.covariate_size = dataset2cov_size[dataset] args.seq_num = dataset2seq_num[dataset] args.embed_type = dataset2embed[dataset] return args
Prepare specific parameters for different datasets
13,456
import argparse import numpy as np import time import torch import torch.optim as optim import pyraformer.Pyraformer_LR as Pyraformer from tqdm import tqdm from data_loader import * from utils.tools import TopkMSELoss, metric def prepare_dataloader(args): """ Load data and prepare dataloader. """ data_dict = { 'ETTh1':Dataset_ETT_hour, 'ETTh2':Dataset_ETT_hour, 'ETTm1':Dataset_ETT_minute, 'ETTm2':Dataset_ETT_minute, 'electricity':Dataset_Custom, 'exchange':Dataset_Custom, 'traffic':Dataset_Custom, 'weather':Dataset_Custom, 'ili':Dataset_Custom, # 'flow': Dataset_Custom2, # 'synthetic': Dataset_Synthetic, } Data = data_dict[args.data] # prepare training dataset and dataloader shuffle_flag = True; drop_last = True; batch_size = args.batch_size train_set = Data( root_path=args.root_path, data_path=args.data_path, flag='train', size=[args.input_size, args.predict_step], inverse=args.inverse, dataset=args.data ) print('train', len(train_set)) train_loader = DataLoader( train_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=0, drop_last=drop_last) # prepare testing dataset and dataloader shuffle_flag = False; drop_last = False; batch_size = args.batch_size test_set = Data( root_path=args.root_path, data_path=args.data_path, flag='test', size=[args.input_size, args.predict_step], inverse=args.inverse, dataset=args.data ) print('test', len(test_set)) test_loader = DataLoader( test_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=0, drop_last=drop_last) return train_loader, train_set, test_loader, test_set def eval_epoch(model, test_dataset, test_loader, opt, epoch): """ Epoch operation in evaluation phase. """ model.eval() preds = [] trues = [] warm = 0 with torch.no_grad(): for batch in tqdm(test_loader, mininterval=2, desc=' - (Validation) ', leave=False): """ prepare data """ batch_x, batch_y, batch_x_mark, batch_y_mark, mean, std = map(lambda x: x.float().to(opt.device), batch) dec_inp = torch.zeros_like(batch_y).float() # forward if opt.decoder == 'FC': # Add a predict token into the history sequence predict_token = torch.zeros(batch_x.size(0), 1, batch_x.size(-1), device=batch_x.device) batch_x = torch.cat([batch_x, predict_token], dim=1) batch_x_mark = torch.cat([batch_x_mark, batch_y_mark[:, 0:1, :]], dim=1) outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark, False) warm += 1 # if inverse, both the output and the ground truth are denormalized. if opt.inverse: outputs, batch_y = test_dataset.inverse_transform(outputs, batch_y, mean, std) pred = outputs.detach().cpu().numpy() true = batch_y.detach().cpu().numpy() preds.append(pred) trues.append(true) preds = np.array(preds) trues = np.array(trues) # preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) # trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) preds = np.concatenate(preds, axis=0) print(preds.shape) trues = np.concatenate(trues, axis=0) # np.save('./results/' + 'pred.npy', preds) # np.save('./results/'+ 'true.npy', trues) print('test shape:{}'.format(preds.shape)) mae, mse, rmse, mape, mspe = metric(preds, trues) print('Epoch {}, mse:{}, mae:{}, rmse:{}, mape:{}, mspe:{}'.format(epoch, mse, mae, rmse, mape, mspe)) return mse, mae, rmse, mape, mspe The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(model, opt, model_save_dir)` to solve the following problem: Evaluate preptrained models Here is the function: def evaluate(model, opt, model_save_dir): """Evaluate preptrained models""" best_mse = 100000000 """ prepare dataloader """ _, _, test_dataloader, test_dataset = prepare_dataloader(opt) """ load pretrained model """ checkpoint = torch.load(model_save_dir)["state_dict"] model.load_state_dict(checkpoint) best_metrics = [] mse, mae, rmse, mape, mspe = eval_epoch(model, test_dataset, test_dataloader, opt, 0) current_metrics = [float(mse), float(mae), float(rmse), float(mape), float(mspe)] if best_mse > mse: best_mse = mse best_metrics = current_metrics return best_metrics
Evaluate preptrained models
13,457
import argparse import numpy as np import time import torch import torch.optim as optim import pyraformer.Pyraformer_LR as Pyraformer from tqdm import tqdm from data_loader import * from utils.tools import TopkMSELoss, metric def parse_args(): parser = argparse.ArgumentParser() # running mode parser.add_argument('-eval', action='store_true', default=False) # Path parameters parser.add_argument('-data', type=str, default='ETTh1') parser.add_argument('-root_path', type=str, default='../dataset/', help='root path of the data file') parser.add_argument('-data_path', type=str, default='ETTh1.csv', help='data file') # Dataloader parameters. parser.add_argument('-input_size', type=int, default=168) parser.add_argument('-predict_step', type=int, default=168) parser.add_argument('-inverse', action='store_true', help='denormalize output data', default=False) # Architecture selection. parser.add_argument('-model', type=str, default='Pyraformer') parser.add_argument('-decoder', type=str, default='FC') # selection: [FC, attention] # Training parameters. parser.add_argument('-epoch', type=int, default=5) parser.add_argument('-batch_size', type=int, default=32) parser.add_argument('-pretrain', action='store_true', default=False) parser.add_argument('-hard_sample_mining', action='store_true', default=False) parser.add_argument('-dropout', type=float, default=0.05) parser.add_argument('-lr', type=float, default=1e-4) parser.add_argument('-lr_step', type=float, default=0.1) # Common Model parameters. parser.add_argument('-d_model', type=int, default=512) parser.add_argument('-d_inner_hid', type=int, default=512) parser.add_argument('-d_k', type=int, default=128) parser.add_argument('-d_v', type=int, default=128) parser.add_argument('-d_bottleneck', type=int, default=128) parser.add_argument('-n_head', type=int, default=4) parser.add_argument('-n_layer', type=int, default=4) # Pyraformer parameters. parser.add_argument('-window_size', type=str, default='[4, 4, 4]') # The number of children of a parent node. parser.add_argument('-inner_size', type=int, default=3) # The number of ajacent nodes. # CSCM structure. selection: [Bottleneck_Construct, Conv_Construct, MaxPooling_Construct, AvgPooling_Construct] parser.add_argument('-CSCM', type=str, default='Bottleneck_Construct') parser.add_argument('-truncate', action='store_true', default=False) # Whether to remove coarse-scale nodes from the attention structure parser.add_argument('-use_tvm', action='store_true', default=False) # Whether to use TVM. # Experiment repeat times. parser.add_argument('-iter_num', type=int, default=1) # Repeat number. opt = parser.parse_args() return opt
null
13,458
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from datetime import datetime, timedelta import pandas as pd import math import numpy as np import random from tqdm import trange from io import BytesIO from urllib.request import urlopen from zipfile import ZipFile from math import sqrt from pandas import read_csv, DataFrame from scipy import stats import matplotlib import matplotlib.pyplot as plt The provided code snippet includes necessary dependencies for implementing the `prep_data` function. Write a Python function `def prep_data(data, covariates, data_start, train = True)` to solve the following problem: Divide the training sequence into windows Here is the function: def prep_data(data, covariates, data_start, train = True): """Divide the training sequence into windows""" time_len = data.shape[0] input_size = window_size-stride_size windows_per_series = np.full((num_series), (time_len-input_size) // stride_size) if train: windows_per_series -= (data_start+stride_size-1) // stride_size total_windows = np.sum(windows_per_series) x_input = np.zeros((total_windows, window_size, 1 + num_covariates + 1), dtype='float32') label = np.zeros((total_windows, window_size), dtype='float32') v_input = np.zeros((total_windows, 2), dtype='float32') count = 0 if not train: covariates = covariates[-time_len:] for series in trange(num_series): cov_age = stats.zscore(np.arange(total_time-data_start[series])) # shape:(series_len,) if train: covariates[data_start[series]:time_len, 0] = cov_age[:time_len-data_start[series]] else: covariates[:, 0] = cov_age[-time_len:] for i in range(windows_per_series[series]): if train: window_start = stride_size*i+data_start[series] else: window_start = stride_size*i window_end = window_start+window_size ''' print("x: ", x_input[count, 1:, 0].shape) print("window start: ", window_start) print("window end: ", window_end) print("data: ", data.shape) print("d: ", data[window_start:window_end-1, series].shape) ''' x_input[count, 1:, 0] = data[window_start:window_end-1, series] x_input[count, :, 1:1+num_covariates] = covariates[window_start:window_end, :] x_input[count, :, -1] = series label[count, :] = data[window_start:window_end, series] nonzero_sum = (x_input[count, 1:input_size, 0]!=0).sum() if nonzero_sum == 0: v_input[count, 0] = 0 else: v_input[count, 0] = np.true_divide(x_input[count, 1:input_size, 0].sum(),nonzero_sum)+1 x_input[count, :, 0] = x_input[count, :, 0]/v_input[count, 0] if train: label[count, :] = label[count, :]/v_input[count, 0] count += 1 prefix = os.path.join(save_path, 'train_' if train else 'test_') np.save(prefix+'data_'+save_name, x_input) np.save(prefix+'v_'+save_name, v_input) np.save(prefix+'label_'+save_name, label)
Divide the training sequence into windows
13,459
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from datetime import datetime, timedelta import pandas as pd import math import numpy as np import random from tqdm import trange from io import BytesIO from urllib.request import urlopen from zipfile import ZipFile from math import sqrt from pandas import read_csv, DataFrame from scipy import stats import matplotlib import matplotlib.pyplot as plt The provided code snippet includes necessary dependencies for implementing the `gen_covariates` function. Write a Python function `def gen_covariates(times, num_covariates)` to solve the following problem: Get covariates Here is the function: def gen_covariates(times, num_covariates): """Get covariates""" covariates = np.zeros((times.shape[0], num_covariates)) for i, input_time in enumerate(times): covariates[i, 1] = input_time.weekday() covariates[i, 2] = input_time.hour covariates[i, 3] = input_time.month for i in range(1,num_covariates): covariates[:,i] = stats.zscore(covariates[:,i]) return covariates[:, :num_covariates]
Get covariates
13,460
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from datetime import datetime, timedelta import pandas as pd import math import numpy as np import random from tqdm import trange from io import BytesIO from urllib.request import urlopen from zipfile import ZipFile from math import sqrt from pandas import read_csv, DataFrame from scipy import stats import matplotlib import matplotlib.pyplot as plt def visualize(data, week_start): x = np.arange(window_size) f = plt.figure() plt.plot(x, data[week_start:week_start+window_size], color='b') f.savefig("visual.png") plt.close()
null
13,461
from numpy.lib.npyio import save import pandas as pd import numpy as np import matplotlib.pyplot as plt import os from tqdm import trange import zipfile def load_data(filedir): data_frame = pd.read_csv(filedir, header=0, parse_dates=True) #names=['app_name', 'zone', 'time', 'value'] data_frame = data_frame.drop(data_frame.columns[0], axis=1) grouped_data = list(data_frame.groupby(["app_name", "zone"])) # covariates = gen_covariates(data_frame.index, 3) all_data = [] for i in range(len(grouped_data)): single_df = grouped_data[i][1].drop(labels=['app_name', 'zone'], axis=1).sort_values(by="time", ascending=True) times = pd.to_datetime(single_df.time) single_df['weekday'] = times.dt.dayofweek / 6 single_df['hour'] = times.dt.hour / 23 single_df['month'] = times.dt.month / 12 temp_data = single_df.values[:, 1:] if (temp_data[:, 0] == 0).sum() / len(temp_data) > 0.2: continue all_data.append(temp_data) return all_data
null
13,462
from numpy.lib.npyio import save import pandas as pd import numpy as np import matplotlib.pyplot as plt import os from tqdm import trange import zipfile def visualize(data, index, save_dir): os.makedirs(save_dir, exist_ok=True) for i in range(index): x = np.arange(len(data[i])) f = plt.figure() plt.plot(x, data[i][:, 0]) f.savefig(os.path.join(save_dir, "visual_{}.png".format(i))) plt.close()
null
13,463
from numpy.lib.npyio import save import pandas as pd import numpy as np import matplotlib.pyplot as plt import os from tqdm import trange import zipfile def normalize(inputs, seq_length): base_seq = inputs[:, :(seq_length-1), 0] nonzeros = (base_seq > 0).sum(1) v = base_seq.sum(1) / nonzeros v[v == 0] = 1 inputs[:, :, 0] = inputs[:, :, 0] / v[:, None] return inputs, v def save(data, v, save_dir): np.save(save_dir+'_data_flow.npy', data) np.save(save_dir+'_v_flow.npy', v) The provided code snippet includes necessary dependencies for implementing the `split_seq` function. Write a Python function `def split_seq(sequences, seq_length, slide_step, predict_length, save_dir)` to solve the following problem: Divide the training sequence into windows Here is the function: def split_seq(sequences, seq_length, slide_step, predict_length, save_dir): """Divide the training sequence into windows""" train_data = [] test_data = [] for seq_id in trange(len(sequences)): split_start = 0 single_seq = sequences[seq_id][:, 0] single_covariate = sequences[seq_id][:, 1:] windows = (len(single_seq)-seq_length+slide_step) // slide_step count = 0 train_count = int(0.97 * windows) while len(single_seq[split_start:]) > (seq_length + predict_length): seq_data = single_seq[split_start:(split_start+seq_length+predict_length-1)] single_data = np.zeros((seq_length+predict_length-1, 5)) single_data[:, 0] = seq_data.copy() single_data[:, 1:4] = single_covariate[split_start:(split_start+seq_length+predict_length-1)] single_data[:, -1] = seq_id count += 1 if count < train_count: train_data.append(single_data) else: test_data.append(single_data) split_start += slide_step os.makedirs(save_dir, exist_ok=True) train_data = np.array(train_data, dtype=np.float32) train_data, v = normalize(train_data, seq_length) save(train_data, v, save_dir + 'train') test_data = np.array(test_data, dtype=np.float32) test_data, v = normalize(test_data, seq_length) save(test_data, v, save_dir + 'test')
Divide the training sequence into windows
13,464
from numpy.lib.npyio import save import pandas as pd import numpy as np import matplotlib.pyplot as plt import os from tqdm import trange import zipfile def dezip(filedir): zip_file = zipfile.ZipFile(filedir) zip_list = zip_file.namelist() parent_dir = filedir.split('/')[0] for f in zip_list: zip_file.extract(f, parent_dir) zip_file.close()
null
13,465
import numpy as np import matplotlib.pyplot as plt from fbm import FBM The provided code snippet includes necessary dependencies for implementing the `fractional_brownian_noise` function. Write a Python function `def fractional_brownian_noise(length, hurst, step)` to solve the following problem: Genereate fractional brownian noise Here is the function: def fractional_brownian_noise(length, hurst, step): """Genereate fractional brownian noise""" f = FBM(length, hurst, step) noise = f.fbm() return noise
Genereate fractional brownian noise
13,466
import numpy as np import matplotlib.pyplot as plt from fbm import FBM def generate_sin(x, T, A): """Generate a mixed sinusoidal sequence""" y = np.zeros(len(x)) for i in range(len(T)): y += A[i] * np.sin(2 * np.pi / T[i] * x) return y def gen_covariates(x, index): """Generate covariates""" covariates = np.zeros((x.shape[0], 4)) covariates[:, 0] = (x // 24) % 7 covariates[:, 1] = x % 24 covariates[:, 2] = (x // (24 * 30)) % 12 covariates[:, 0] = covariates[:, 0] / 6 covariates[:, 1] = covariates[:, 1] / 23 covariates[:, 2] = covariates[:, 2] / 11 covariates[:, -1] = np.zeros(x.shape[0]) + index return covariates def polynomial_decay_cov(length): """Define the function of covariance decay with distance""" mean = np.zeros(length) x_axis = np.arange(length) distance = x_axis[:, None] - x_axis[None, :] distance = np.abs(distance) cov = 1 / (distance + 1) return mean, cov def multivariate_normal(mean, cov, seq_num): """Generate multivariate normal distribution""" noise = np.random.multivariate_normal(mean, cov, (seq_num,), 'raise') return noise The provided code snippet includes necessary dependencies for implementing the `synthesis_data` function. Write a Python function `def synthesis_data()` to solve the following problem: synthesis a mixed sinusoidal dataset Here is the function: def synthesis_data(): """synthesis a mixed sinusoidal dataset""" T = [24, 168, 720] seq_num = 60 seq_len = T[-1] * 20 data = [] covariates = [] for i in range(seq_num): start = int(np.random.uniform(0, T[-1])) x = start + np.arange(seq_len) A = np.random.uniform(5, 10, 3) y = generate_sin(x, T, A) data.append(y) covariates.append(gen_covariates(x, i)) # plt.plot(x[:T[-1]], y[:T[-1]]) # plt.show() data = np.array(data) mean, cov = polynomial_decay_cov(seq_len) noise = multivariate_normal(mean, cov, seq_num) data = data + noise covariates = np.array(covariates) data = np.concatenate([data[:, :, None], covariates], axis=2) np.save('data/synthetic.npy', data)
synthesis a mixed sinusoidal dataset
13,467
import numpy as np import matplotlib.pyplot as plt from fbm import FBM def covariance(data): """compute the covariance of the data""" data_mean = data.mean(0) data = data - data_mean length = data.shape[1] data_covariance = np.zeros((length, length)) for i in range(length): for j in range(length): data_covariance[i, j] = (data[:, i] * data[:, j]).mean() return data_covariance The provided code snippet includes necessary dependencies for implementing the `test_fbm` function. Write a Python function `def test_fbm()` to solve the following problem: Plot the covariance of the generated fractional brownian noise Here is the function: def test_fbm(): """Plot the covariance of the generated fractional brownian noise""" f = FBM(300, 0.3, 1) fbm_data = [] for i in range(100): sample = f.fbm() fbm_data.append(sample[1:]) fbm_data = np.array(fbm_data) cov = covariance(fbm_data) plt.imshow(cov) plt.savefig('fbm_cov.jpg')
Plot the covariance of the generated fractional brownian noise
13,468
import os import pandas as pd from torch.utils.data import Dataset, DataLoader from utils.tools import StandardScaler from utils.timefeatures import time_features import numpy as np import torch import warnings The provided code snippet includes necessary dependencies for implementing the `get_all_v` function. Write a Python function `def get_all_v(train_data, train_end, seq_len, pred_len, window_stride, type)` to solve the following problem: Get the normalization parameters of each sequence Here is the function: def get_all_v(train_data, train_end, seq_len, pred_len, window_stride, type): """Get the normalization parameters of each sequence""" seq_num = train_data.size(0) window_per_seq = (train_end - seq_len - pred_len) // window_stride window_number = seq_num * window_per_seq v = torch.zeros(window_number, dtype=torch.float64) for index in range(window_number): seq_idx = index // window_per_seq window_idx = index % window_per_seq s_begin = window_idx * window_stride s_end = s_begin + seq_len seq_x = train_data[seq_idx, s_begin:s_end].clone() if type == 'mean': mean = seq_x.mean() v[index] = mean + 1 else: std = seq_x.std() v[index] = std return v
Get the normalization parameters of each sequence
13,469
import os import pandas as pd from torch.utils.data import Dataset, DataLoader from utils.tools import StandardScaler from utils.timefeatures import time_features import numpy as np import torch import warnings def gen_covariates(times, num_covariates): """Get covariates""" covariates = np.zeros((times.shape[0], num_covariates)) for i, input_time in enumerate(times): covariates[i, 0] = input_time.weekday() / 7 covariates[i, 1] = input_time.hour / 24 covariates[i, 2] = input_time.month / 12 return covariates The provided code snippet includes necessary dependencies for implementing the `preprocess_elect` function. Write a Python function `def preprocess_elect(csv_path)` to solve the following problem: preprocess the elect dataset for long range forecasting Here is the function: def preprocess_elect(csv_path): """preprocess the elect dataset for long range forecasting""" num_covariates = 4 train_start = '2011-01-01 00:00:00' train_end = '2014-04-01 23:00:00' test_start = '2014-04-01 00:00:00' test_end = '2014-09-07 23:00:00' data_frame = pd.read_csv(csv_path, sep=";", index_col=0, parse_dates=True, decimal=',') data_frame = data_frame.resample('1H',label = 'left',closed = 'right').sum()[train_start:test_end] data_frame.fillna(0, inplace=True) covariates = gen_covariates(data_frame[train_start:test_end].index, num_covariates) all_data = data_frame[train_start:test_end].values data_start = (all_data!=0).argmax(axis=0) #find first nonzero value in each time series train_end = len(data_frame[train_start:train_end].values) all_data = all_data[:, data_start < 10000] data_start = data_start[data_start < 10000] split_start = data_start.max() all_data = all_data[split_start:] covariates = covariates[split_start:] train_end = train_end - split_start return all_data.astype(np.float32), covariates.astype(np.float32), train_end
preprocess the elect dataset for long range forecasting
13,470
import os import pandas as pd from torch.utils.data import Dataset, DataLoader from utils.tools import StandardScaler from utils.timefeatures import time_features import numpy as np import torch import warnings The provided code snippet includes necessary dependencies for implementing the `preprocess_flow` function. Write a Python function `def preprocess_flow(csv_path)` to solve the following problem: preprocess the app flow dataset for long range forecasting Here is the function: def preprocess_flow(csv_path): """preprocess the app flow dataset for long range forecasting""" data_frame = pd.read_csv(csv_path, names=['app_name', 'zone', 'time', 'value'], parse_dates=True) grouped_data = list(data_frame.groupby(["app_name", "zone"])) # covariates = gen_covariates(data_frame.index, 3) all_data = [] min_length = 10000 for i in range(len(grouped_data)): single_df = grouped_data[i][1].drop(labels=['app_name', 'zone'], axis=1).sort_values(by="time", ascending=True) times = pd.to_datetime(single_df.time) single_df['weekday'] = times.dt.dayofweek / 7 single_df['hour'] = times.dt.hour / 24 single_df['month'] = times.dt.month / 12 temp_data = single_df.values[:, 1:] if (temp_data[:, 0] == 0).sum() / len(temp_data) > 0.2 or len(temp_data) < 3000: continue if len(temp_data) < min_length: min_length = len(temp_data) all_data.append(temp_data) all_data = np.array([data[len(data)-min_length:, :] for data in all_data]).transpose(1, 0, 2).astype(np.float32) train_end = min(int(0.8 * min_length), min_length - 1000) covariates = all_data.copy() covariates[:, :, :-1] = covariates[:, :, 1:] return all_data[:, :, 0], covariates, train_end
preprocess the app flow dataset for long range forecasting
13,471
import os import pandas as pd from torch.utils.data import Dataset, DataLoader from utils.tools import StandardScaler from utils.timefeatures import time_features import numpy as np import torch import warnings def split(split_start, label, cov, pred_length): all_data = [] for batch_idx in range(len(label)): batch_label = label[batch_idx] for i in range(pred_length): single_data = batch_label[i:(split_start+i)].clone().unsqueeze(1) single_data[-1] = -1 single_cov = cov[batch_idx, i:(split_start+i), :].clone() temp_data = [single_data, single_cov] single_data = torch.cat(temp_data, dim=1) all_data.append(single_data) data = torch.stack(all_data, dim=0) label = label[:, -pred_length:].reshape(pred_length*len(label)) return data, label
null
13,472
import numpy as np from numpy.core.defchararray import split import pandas as pd from datetime import datetime from scipy import stats import os def load_data(datadir): df = pd.read_csv(datadir) data = (df.values).transpose(1, 0) return data
null
13,473
import numpy as np from numpy.core.defchararray import split import pandas as pd from datetime import datetime from scipy import stats import os The provided code snippet includes necessary dependencies for implementing the `get_covariates` function. Write a Python function `def get_covariates(data_len, start_day)` to solve the following problem: Get covariates Here is the function: def get_covariates(data_len, start_day): """Get covariates""" start_timestamp = datetime.timestamp(datetime.strptime(start_day, '%Y-%m-%d %H:%M:%S')) timestamps = np.arange(data_len) * 3600 + start_timestamp timestamps = [datetime.fromtimestamp(i) for i in timestamps] weekdays = stats.zscore(np.array([i.weekday() for i in timestamps])) hours = stats.zscore(np.array([i.hour for i in timestamps])) months = stats.zscore(np.array([i.month for i in timestamps])) covariates = np.stack([weekdays, hours, months], axis=1) return covariates
Get covariates
13,474
import numpy as np from numpy.core.defchararray import split import pandas as pd from datetime import datetime from scipy import stats import os def normalize(inputs, seq_length): base_seq = inputs[:, :seq_length, 0] nonzeros = (base_seq > 0).sum(1) inputs = inputs[nonzeros > 0] base_seq = inputs[:, :seq_length, 0] nonzeros = nonzeros[nonzeros > 0] v = base_seq.sum(1) / nonzeros v[v == 0] = 1 inputs[:, :, 0] = inputs[:, :, 0] / v[:, None] return inputs, v def save(data, v, save_dir): np.save(save_dir+'_data_wind.npy', data) np.save(save_dir+'_v_wind.npy', v) The provided code snippet includes necessary dependencies for implementing the `split_seq` function. Write a Python function `def split_seq(sequences, covariates, seq_length, slide_step, predict_length, save_dir)` to solve the following problem: Divide the training sequence into windows Here is the function: def split_seq(sequences, covariates, seq_length, slide_step, predict_length, save_dir): """Divide the training sequence into windows""" data_length = len(sequences[0]) windows = (data_length-seq_length+slide_step) // slide_step train_windows = int(0.97 * windows) test_windows = windows - train_windows train_data = np.zeros((train_windows*len(sequences), seq_length+predict_length-1, 5), dtype=np.float32) test_data = np.zeros((test_windows*len(sequences), seq_length+predict_length-1, 5), dtype=np.float32) count = 0 split_start = 0 seq_ids = np.arange(len(sequences))[:, None] end = split_start + seq_length + predict_length - 1 while end <= data_length: if count < train_windows: train_data[count*len(sequences):(count+1)*len(sequences), :, 0] = sequences[:, split_start:end] train_data[count*len(sequences):(count+1)*len(sequences), :, 1:4] = covariates[split_start:end, :] train_data[count*len(sequences):(count+1)*len(sequences), :, -1] = seq_ids else: test_data[(count-train_windows)*len(sequences):(count-train_windows+1)*len(sequences), :, 0] = sequences[:, split_start:end] test_data[(count-train_windows)*len(sequences):(count-train_windows+1)*len(sequences), :, 1:4] = covariates[split_start:end, :] test_data[(count-train_windows)*len(sequences):(count-train_windows+1)*len(sequences), :, -1] = seq_ids count += 1 split_start += slide_step end = split_start + seq_length + predict_length - 1 os.makedirs(save_dir, exist_ok=True) train_data, v = normalize(train_data, seq_length) save(train_data, v, save_dir + 'train') test_data, v = normalize(test_data, seq_length) save(test_data, v, save_dir + 'test')
Divide the training sequence into windows
13,475
from typing import List import math import torch from torch import nn import torch.nn.functional as F import torch.optim as optim from hierarchical_mm_tvm import graph_mm as graph_mm_tvm import argparse import time import numpy as np from math import sqrt torch.cuda.set_device(0) import pynvml The provided code snippet includes necessary dependencies for implementing the `get_q_k` function. Write a Python function `def get_q_k(input_size, window_size, stride, device)` to solve the following problem: Get the query-key index for PAM-TVM Here is the function: def get_q_k(input_size, window_size, stride, device): """Get the query-key index for PAM-TVM""" second_length = input_size // stride second_last = input_size - (second_length - 1) * stride third_start = input_size + second_length third_length = second_length // stride third_last = second_length - (third_length - 1) * stride max_attn = max(second_last, third_last) fourth_start = third_start + third_length fourth_length = third_length // stride full_length = fourth_start + fourth_length fourth_last = third_length - (fourth_length - 1) * stride max_attn = max(third_last, fourth_last) max_attn += window_size + 1 mask = torch.zeros(full_length, max_attn, dtype=torch.int32, device=device) - 1 # 按照层内、下层、上层的顺序为序列中每个q找对应的k # 第一层 for i in range(input_size): mask[i, 0:window_size] = i + torch.arange(window_size) - window_size // 2 # 当window在序列右端时,把它给注释掉 mask[i, mask[i] > input_size - 1] = -1 mask[i, -1] = i // stride + input_size mask[i][mask[i] > third_start - 1] = third_start - 1 # 第二层 for i in range(second_length): mask[input_size+i, 0:window_size] = input_size + i + torch.arange(window_size) - window_size // 2 # 当window在序列左端时,置为-1 mask[input_size+i, mask[input_size+i] < input_size] = -1 # 当window在序列右端时,置为-1 mask[input_size+i, mask[input_size+i] > third_start - 1] = -1 if i < second_length - 1: mask[input_size+i, window_size:(window_size+stride)] = torch.arange(stride) + i * stride else: mask[input_size+i, window_size:(window_size+second_last)] = torch.arange(second_last) + i * stride mask[input_size+i, -1] = i // stride + third_start mask[input_size+i, mask[input_size+i] > fourth_start - 1] = fourth_start - 1 # 第三层 for i in range(third_length): mask[third_start+i, 0:window_size] = third_start + i + torch.arange(window_size) - window_size // 2 # 当window在序列左端时,置为-1 mask[third_start+i, mask[third_start+i] < third_start] = -1 # 当window在序列右端时,置为-1 mask[third_start+i, mask[third_start+i] > fourth_start - 1] = -1 if i < third_length - 1: mask[third_start+i, window_size:(window_size+stride)] = input_size + torch.arange(stride) + i * stride else: mask[third_start+i, window_size:(window_size+third_last)] = input_size + torch.arange(third_last) + i * stride mask[third_start+i, -1] = i // stride + fourth_start mask[third_start+i, mask[third_start+i] > full_length - 1] = full_length - 1 # 第四层 for i in range(fourth_length): mask[fourth_start+i, 0:window_size] = fourth_start + i + torch.arange(window_size) - window_size // 2 # 当window在序列左端时,置为-1 mask[fourth_start+i, mask[fourth_start+i] < fourth_start] = -1 # 当window在序列右端时,置为-1 mask[fourth_start+i, mask[fourth_start+i] > full_length - 1] = -1 if i < fourth_length - 1: mask[fourth_start+i, window_size:(window_size+stride)] = third_start + torch.arange(stride) + i * stride else: mask[fourth_start+i, window_size:(window_size+fourth_last)] = third_start + torch.arange(fourth_last) + i * stride return mask
Get the query-key index for PAM-TVM
13,476
from typing import List import math import torch from torch import nn import torch.nn.functional as F import torch.optim as optim from hierarchical_mm_tvm import graph_mm as graph_mm_tvm import argparse import time import numpy as np from math import sqrt torch.cuda.set_device(0) import pynvml The provided code snippet includes necessary dependencies for implementing the `get_k_q` function. Write a Python function `def get_k_q(q_k_mask)` to solve the following problem: Get the key-query index from query-key index for PAM-TVM Here is the function: def get_k_q(q_k_mask): """Get the key-query index from query-key index for PAM-TVM""" k_q_mask = q_k_mask.clone() for i in range(len(q_k_mask)): for j in range(len(q_k_mask[0])): if q_k_mask[i, j] >= 0: k_q_mask[i, j] = torch.where(q_k_mask[q_k_mask[i, j]] ==i )[0] return k_q_mask
Get the key-query index from query-key index for PAM-TVM
13,477
from typing import List import math import torch from torch import nn import torch.nn.functional as F import torch.optim as optim from hierarchical_mm_tvm import graph_mm as graph_mm_tvm import argparse import time import numpy as np from math import sqrt torch.cuda.set_device(0) import pynvml The provided code snippet includes necessary dependencies for implementing the `get_mask` function. Write a Python function `def get_mask(input_size, window_size, inner_size, device)` to solve the following problem: Get the attention mask of PAM-Naive Here is the function: def get_mask(input_size, window_size, inner_size, device): """Get the attention mask of PAM-Naive""" # Get the size of all layers all_size = [] all_size.append(input_size) second_size = math.floor(input_size / window_size) all_size.append(second_size) third_size = math.floor(second_size / window_size) all_size.append(third_size) fourth_size = math.floor(third_size / window_size) all_size.append(fourth_size) seq_length = sum(all_size) mask = torch.zeros(seq_length, seq_length, device=device) # Get the intra-scale mask of each scale inner_window = inner_size // 2 # The first scale for i in range(input_size): left_side = max(i - inner_window, 0) right_side = min(i + inner_window + 1, input_size) mask[i, left_side:right_side] = 1 # The second scale start = input_size for i in range(start, start + second_size): left_side = max(i - inner_window, start) right_side = min(i + inner_window + 1, start + second_size) mask[i, left_side:right_side] = 1 # The third scale start = input_size + second_size for i in range(start, start + third_size): left_side = max(i - inner_window, start) right_side = min(i + inner_window + 1, start + third_size) mask[i, left_side:right_side] = 1 # The fourth scale start = input_size + second_size + third_size for i in range(start, start + fourth_size): left_side = max(i - inner_window, start) right_side = min(i + inner_window + 1, start + fourth_size) mask[i, left_side:right_side] = 1 # Get the inter-scale mask start = input_size for i in range(start, start + second_size): left_side = (i - input_size) * window_size if i == (start + second_size - 1): right_side = start else: right_side = (i - input_size + 1) * window_size mask[i, left_side:right_side] = 1 mask[left_side:right_side, i] = 1 # The third scale start = input_size + second_size for i in range(start, start + third_size): left_side = input_size + (i - start) * window_size if i == (start + third_size - 1): right_side = start else: right_side = input_size + (i - start + 1) * window_size mask[i, left_side:right_side] = 1 mask[left_side:right_side, i] = 1 # The fourth scale start = input_size + second_size + third_size for i in range(start, start + fourth_size): left_side = input_size + second_size + (i - start) * window_size if i == (start + fourth_size - 1): right_side = start else: right_side = input_size + second_size + (i - start + 1) * window_size mask[i, left_side:right_side] = 1 mask[left_side:right_side, i] = 1 mask = (1 - mask).bool() return mask, all_size
Get the attention mask of PAM-Naive
13,478
from typing import List import math import torch from torch import nn import torch.nn.functional as F import torch.optim as optim from hierarchical_mm_tvm import graph_mm as graph_mm_tvm import argparse import time import numpy as np from math import sqrt import pynvml def parsing(): parser = argparse.ArgumentParser(description='Needed for graph self attention.') parser.add_argument('-d_model', type=int, default=256) parser.add_argument('-d_k', type=int, default=64) parser.add_argument('-normalize_before', type=bool, default=False) parser.add_argument('-n_head', type=int, default=4) parser.add_argument('-dropout', type=float, default=0.1) # arguments for Multiformer parser.add_argument('-window_size', type=int, default=3) parser.add_argument('-stride_size', type=int, default=25) # arguments for ProbSparse parser.add_argument('-factor', type=int, default=5) # arguments for full-attention parser.add_argument('-mask', type=int, default=0) parser.add_argument('-seq_len', type=int, default=1000) args = parser.parse_args() return args
null
13,479
from typing import List import math import torch from torch import nn import torch.nn.functional as F import torch.optim as optim from hierarchical_mm_tvm import graph_mm as graph_mm_tvm import argparse import time import numpy as np from math import sqrt torch.cuda.set_device(0) print('Using device: {}'.format(torch.cuda.get_device_name())) import pynvml pynvml.nvmlInit() class NormalSelfAttention(nn.Module): def __init__(self, opt): super(NormalSelfAttention, self).__init__() self.normalize_before = opt.normalize_before self.n_head = opt.n_head self.d_k = opt.d_k self.w_qs = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) self.w_ks = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) self.w_vs = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) nn.init.xavier_uniform_(self.w_qs.weight) nn.init.xavier_uniform_(self.w_ks.weight) nn.init.xavier_uniform_(self.w_vs.weight) self.fc = nn.Linear(opt.d_k * opt.n_head, opt.d_model) nn.init.xavier_uniform_(self.fc.weight) self.layer_norm = nn.LayerNorm(opt.d_model, eps=1e-6) self.dropout_attn = nn.Dropout(opt.dropout) self.dropout_fc = nn.Dropout(opt.dropout) self.seq_len = opt.seq_len self.window_size = opt.window_size self.stride_size = opt.stride_size if opt.mask: self.mask, _ = get_mask(self.seq_len, self.stride_size, self.window_size, opt.device) else: self.mask = None def forward(self, hidden_states): residual = hidden_states hidden_states = hidden_states bsz, seq_len, _ = hidden_states.size() q = hidden_states if self.normalize_before: q = self.layer_norm(q) q = self.w_qs(q) k = self.w_ks(hidden_states) v = self.w_vs(hidden_states) q /= math.sqrt(self.d_k) q = q.view(bsz, seq_len, self.n_head, self.d_k).transpose(1, 2) k = k.view(bsz, seq_len, self.n_head, self.d_k).transpose(1, 2) v = v.view(bsz, seq_len, self.n_head, self.d_k).transpose(1, 2) q = q.float().contiguous() k = k.float().contiguous() v = v.float().contiguous() attn = torch.matmul(q, k.transpose(2, 3)) if self.mask is not None: attn = attn.masked_fill(self.mask.unsqueeze(0).unsqueeze(1), -1e9) attn = self.dropout_attn(F.softmax(attn, dim=-1)) attn = torch.matmul(attn, v).transpose(1, 2).contiguous() attn = attn.view(bsz, seq_len, self.n_head * self.d_k) context = self.dropout_fc(self.fc(attn)) context += residual if not self.normalize_before: context = self.layer_norm(context) return context The provided code snippet includes necessary dependencies for implementing the `test_NSA` function. Write a Python function `def test_NSA(args, input_len)` to solve the following problem: Test the time and CUDA memory consumption of normal self attention. Here is the function: def test_NSA(args, input_len): """Test the time and CUDA memory consumption of normal self attention.""" handle = pynvml.nvmlDeviceGetHandleByIndex(1) meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) init_mem = meminfo.used / 1024**3 NSA_Layer = NormalSelfAttention(args).to(args.device) optimizer = optim.Adam(NSA_Layer.parameters(), 1e-4) optimizer.zero_grad() hidden_state = torch.ones(4, input_len, args.d_model, dtype=torch.float32).to(args.device) fake_gt = torch.zeros(4, input_len, args.d_model).to(args.device) # Preload the layer result = NSA_Layer(hidden_state) loss = ((fake_gt - result) ** 2).mean() loss.backward() optimizer.step() used_memory = 0 start_time = time.time() for i in range(1000): result = NSA_Layer(hidden_state) handle = pynvml.nvmlDeviceGetHandleByIndex(1) meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) used_memory += meminfo.used / 1024**3 loss = ((fake_gt - result) ** 2).mean() loss.backward() optimizer.step() print('NSA used average time: {} s'.format(round((time.time() - start_time) / 1000, 4))) used_memory = used_memory / 1000 print('NSA used average memory: {} GB'.format(round(used_memory-init_mem, 4)))
Test the time and CUDA memory consumption of normal self attention.
13,480
from typing import List import math import torch from torch import nn import torch.nn.functional as F import torch.optim as optim from hierarchical_mm_tvm import graph_mm as graph_mm_tvm import argparse import time import numpy as np from math import sqrt torch.cuda.set_device(0) print('Using device: {}'.format(torch.cuda.get_device_name())) import pynvml pynvml.nvmlInit() class GraphSelfAttention(nn.Module): def __init__(self, opt): super(GraphSelfAttention, self).__init__() self.normalize_before = opt.normalize_before self.n_head = opt.n_head self.d_k = opt.d_k self.w_qs = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) self.w_ks = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) self.w_vs = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) nn.init.xavier_uniform_(self.w_qs.weight) nn.init.xavier_uniform_(self.w_ks.weight) nn.init.xavier_uniform_(self.w_vs.weight) self.fc = nn.Linear(opt.d_k * opt.n_head, opt.d_model) nn.init.xavier_uniform_(self.fc.weight) self.layer_norm = nn.LayerNorm(opt.d_model, eps=1e-6) self.dropout_attn = nn.Dropout(opt.dropout) self.dropout_fc = nn.Dropout(opt.dropout) self.seq_len = opt.seq_len self.window_size = opt.window_size self.stride_size = opt.stride_size self.q_k_mask = get_q_k(self.seq_len, self.window_size, self.stride_size, opt.device) self.k_q_mask = get_k_q(self.q_k_mask) def forward(self, hidden_states): residual = hidden_states hidden_states = hidden_states bsz, seq_len, _ = hidden_states.size() q = hidden_states if self.normalize_before: q = self.layer_norm(q) q = self.w_qs(q) k = self.w_ks(hidden_states) v = self.w_vs(hidden_states) q /= math.sqrt(self.d_k) q = q.view(bsz, seq_len, self.n_head, self.d_k) k = k.view(bsz, seq_len, self.n_head, self.d_k) q = q.float().contiguous() k = k.float().contiguous() # attn_weights.size(): (batch_size, L, num_heads, 11) 另外注意这里设置is_t1_diagonaled为False,用于q和k attention attn_weights = graph_mm_tvm(q, k, self.q_k_mask, self.k_q_mask, False, 0) attn_weights = self.dropout_attn(F.softmax(attn_weights, dim=-1)) v = v.view(bsz, seq_len, self.n_head, self.d_k) v = v.float().contiguous() # 这里用于attention scores和v相乘,注意is_t1_diagonaled=True attn = graph_mm_tvm(attn_weights, v, self.q_k_mask, self.k_q_mask, True, 0) attn = attn.reshape(bsz, seq_len, self.n_head * self.d_k).contiguous() context = self.dropout_fc(self.fc(attn)) context += residual if not self.normalize_before: context = self.layer_norm(context) return context The provided code snippet includes necessary dependencies for implementing the `test_GSA` function. Write a Python function `def test_GSA(args, input_len)` to solve the following problem: Test the time and CUDA memory consumption of PAM. Here is the function: def test_GSA(args, input_len): """Test the time and CUDA memory consumption of PAM.""" handle = pynvml.nvmlDeviceGetHandleByIndex(1) meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) init_mem = meminfo.used / 1024**3 GSA_Layer = GraphSelfAttention(args).to(args.device) optimizer = optim.Adam(GSA_Layer.parameters(), 1e-4) optimizer.zero_grad() hidden_state = torch.ones(4, input_len, args.d_model, dtype=torch.float32, device=args.device) fake_gt = torch.zeros(4, input_len, args.d_model, device=args.device) # Preload the layer result = GSA_Layer(hidden_state) loss = ((fake_gt - result) ** 2).mean() loss.backward() optimizer.step() used_memory = 0 repeat_times = 1000 start_time = time.time() for i in range(repeat_times): result = GSA_Layer(hidden_state) handle = pynvml.nvmlDeviceGetHandleByIndex(1) meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) used_memory += meminfo.used / 1024**3 loss = ((fake_gt - result) ** 2).mean() loss.backward() optimizer.step() print('GSA used time:{} s'.format(round((time.time() - start_time) / repeat_times, 4))) used_memory = used_memory / repeat_times print('GSA used average memory: {} GB'.format(round(used_memory-init_mem, 4)))
Test the time and CUDA memory consumption of PAM.
13,481
from typing import List import math import torch from torch import nn import torch.nn.functional as F import torch.optim as optim from hierarchical_mm_tvm import graph_mm as graph_mm_tvm import argparse import time import numpy as np from math import sqrt torch.cuda.set_device(0) print('Using device: {}'.format(torch.cuda.get_device_name())) import pynvml pynvml.nvmlInit() class ProbSparseAttention(nn.Module): def __init__(self, opt): super(ProbSparseAttention, self).__init__() self.normalize_before = opt.normalize_before self.n_head = opt.n_head self.d_k = opt.d_k self.w_qs = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) self.w_ks = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) self.w_vs = nn.Linear(opt.d_model, opt.n_head * opt.d_k, bias=False) nn.init.xavier_uniform_(self.w_qs.weight) nn.init.xavier_uniform_(self.w_ks.weight) nn.init.xavier_uniform_(self.w_vs.weight) self.fc = nn.Linear(opt.d_k * opt.n_head, opt.d_model) nn.init.xavier_uniform_(self.fc.weight) self.layer_norm = nn.LayerNorm(opt.d_model, eps=1e-6) self.dropout_attn = nn.Dropout(opt.dropout) self.dropout_fc = nn.Dropout(opt.dropout) self.seq_len = opt.seq_len self.factor = opt.factor def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q) # Q [B, H, L, D] B, H, L_K, E = K.shape _, _, L_Q, _ = Q.shape # calculate the sampled Q_K K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E) index_sample = torch.randint(L_K, (L_Q, sample_k)) # real U = U_part(factor*ln(L_k))*L_q K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :] Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze() # find the Top_k query with sparisty measurement M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K) M_top = M.topk(n_top, sorted=False)[1] # use the reduced Q to calculate Q_K Q_reduce = Q[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], M_top, :] # factor*ln(L_q) Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k return Q_K, M_top def _get_initial_context(self, V, L_Q): B, H, L_V, D = V.shape V_sum = V.mean(dim=-2) contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone() return contex def _update_context(self, context_in, V, scores, index, L_Q): B, H, L_V, D = V.shape attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores) context_in[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], index, :] = torch.matmul(attn, V).type_as(context_in) return context_in def forward(self, hidden_states): residual = hidden_states hidden_states = hidden_states bsz, seq_len, _ = hidden_states.size() q = hidden_states if self.normalize_before: q = self.layer_norm(q) q = self.w_qs(q) k = self.w_ks(hidden_states) v = self.w_vs(hidden_states) q /= math.sqrt(self.d_k) q = q.view(bsz, seq_len, self.n_head, self.d_k).transpose(1, 2) k = k.view(bsz, seq_len, self.n_head, self.d_k).transpose(1, 2) v = v.view(bsz, seq_len, self.n_head, self.d_k).transpose(1, 2) q = q.float().contiguous() k = k.float().contiguous() v = v.float().contiguous() u = U_part = self.factor * np.ceil(np.log(seq_len)).astype('int').item() # c*ln(L_k) U_part = U_part if U_part<seq_len else seq_len u = u if u < seq_len else seq_len scores_top, index = self._prob_QK(q, k, sample_k=U_part, n_top=u) # get the context context = self._get_initial_context(v, seq_len) # update the context with selected top_k queries context = self._update_context(context, v, scores_top, index, seq_len).transpose(1, 2).contiguous() context = context.view(bsz, seq_len, self.n_head * self.d_k) context = self.dropout_fc(self.fc(context)) context += residual if not self.normalize_before: context = self.layer_norm(context) return context The provided code snippet includes necessary dependencies for implementing the `test_PSA` function. Write a Python function `def test_PSA(args, input_len)` to solve the following problem: Test the time and CUDA memory consumption of Prob-sparse self attention. Here is the function: def test_PSA(args, input_len): """Test the time and CUDA memory consumption of Prob-sparse self attention.""" handle = pynvml.nvmlDeviceGetHandleByIndex(1) meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) init_mem = meminfo.used / 1024**3 LSA_Layer = ProbSparseAttention(args).to(args.device) optimizer = optim.Adam(LSA_Layer.parameters(), 1e-4) optimizer.zero_grad() hidden_state = torch.ones(4, input_len, args.d_model, dtype=torch.float32, device=args.device) fake_gt = torch.zeros(4, input_len, args.d_model, device=args.device) # Preload the layer result = LSA_Layer(hidden_state) loss = ((fake_gt - result) ** 2).mean() loss.backward() optimizer.step() used_memory = 0 repeat_times = 1000 start_time = time.time() for i in range(repeat_times): result = LSA_Layer(hidden_state) handle = pynvml.nvmlDeviceGetHandleByIndex(1) meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) used_memory += meminfo.used / 1024**3 loss = ((fake_gt - result) ** 2).mean() loss.backward() optimizer.step() print('LSA used time:{} s'.format(round((time.time() - start_time) / repeat_times, 4))) used_memory = used_memory / repeat_times print('LSA used average memory: {} GB'.format(round(used_memory-init_mem, 4)))
Test the time and CUDA memory consumption of Prob-sparse self attention.
13,482
from torch.functional import align_tensors import torch.nn as nn from torch.nn.modules.linear import Linear from .SubLayers import MultiHeadAttention, PositionwiseFeedForward import torch from .embed import DataEmbedding, CustomEmbedding import math The provided code snippet includes necessary dependencies for implementing the `get_mask` function. Write a Python function `def get_mask(input_size, window_size, inner_size, device)` to solve the following problem: Get the attention mask of PAM-Naive Here is the function: def get_mask(input_size, window_size, inner_size, device): """Get the attention mask of PAM-Naive""" # Get the size of all layers all_size = [] all_size.append(input_size) for i in range(len(window_size)): layer_size = math.floor(all_size[i] / window_size[i]) all_size.append(layer_size) seq_length = sum(all_size) mask = torch.zeros(seq_length, seq_length, device=device) # get intra-scale mask inner_window = inner_size // 2 for layer_idx in range(len(all_size)): start = sum(all_size[:layer_idx]) for i in range(start, start + all_size[layer_idx]): left_side = max(i - inner_window, start) right_side = min(i + inner_window + 1, start + all_size[layer_idx]) mask[i, left_side:right_side] = 1 # get inter-scale mask for layer_idx in range(1, len(all_size)): start = sum(all_size[:layer_idx]) for i in range(start, start + all_size[layer_idx]): left_side = (start - all_size[layer_idx - 1]) + (i - start) * window_size[layer_idx - 1] if i == ( start + all_size[layer_idx] - 1): right_side = start else: right_side = (start - all_size[layer_idx - 1]) + (i - start + 1) * window_size[layer_idx - 1] mask[i, left_side:right_side] = 1 mask[left_side:right_side, i] = 1 mask = (1 - mask).bool() return mask, all_size
Get the attention mask of PAM-Naive
13,483
from torch.functional import align_tensors import torch.nn as nn from torch.nn.modules.linear import Linear from .SubLayers import MultiHeadAttention, PositionwiseFeedForward import torch from .embed import DataEmbedding, CustomEmbedding import math The provided code snippet includes necessary dependencies for implementing the `refer_points` function. Write a Python function `def refer_points(all_sizes, window_size, device)` to solve the following problem: Gather features from PAM's pyramid sequences Here is the function: def refer_points(all_sizes, window_size, device): """Gather features from PAM's pyramid sequences""" input_size = all_sizes[0] indexes = torch.zeros(input_size, len(all_sizes), device=device) for i in range(input_size): indexes[i][0] = i former_index = i for j in range(1, len(all_sizes)): start = sum(all_sizes[:j]) inner_layer_idx = former_index - (start - all_sizes[j - 1]) former_index = start + min(inner_layer_idx // window_size[j - 1], all_sizes[j] - 1) indexes[i][j] = former_index indexes = indexes.unsqueeze(0).unsqueeze(3) return indexes.long()
Gather features from PAM's pyramid sequences
13,484
from torch.functional import align_tensors import torch.nn as nn from torch.nn.modules.linear import Linear from .SubLayers import MultiHeadAttention, PositionwiseFeedForward import torch from .embed import DataEmbedding, CustomEmbedding import math The provided code snippet includes necessary dependencies for implementing the `get_subsequent_mask` function. Write a Python function `def get_subsequent_mask(input_size, window_size, predict_step, truncate)` to solve the following problem: Get causal attention mask for decoder. Here is the function: def get_subsequent_mask(input_size, window_size, predict_step, truncate): """Get causal attention mask for decoder.""" if truncate: mask = torch.zeros(predict_step, input_size + predict_step) for i in range(predict_step): mask[i][:input_size+i+1] = 1 mask = (1 - mask).bool().unsqueeze(0) else: all_size = [] all_size.append(input_size) for i in range(len(window_size)): layer_size = math.floor(all_size[i] / window_size[i]) all_size.append(layer_size) all_size = sum(all_size) mask = torch.zeros(predict_step, all_size + predict_step) for i in range(predict_step): mask[i][:all_size+i+1] = 1 mask = (1 - mask).bool().unsqueeze(0) return mask
Get causal attention mask for decoder.
13,485
from torch.functional import align_tensors import torch.nn as nn from torch.nn.modules.linear import Linear from .SubLayers import MultiHeadAttention, PositionwiseFeedForward import torch from .embed import DataEmbedding, CustomEmbedding import math The provided code snippet includes necessary dependencies for implementing the `get_q_k` function. Write a Python function `def get_q_k(input_size, window_size, stride, device)` to solve the following problem: Get the index of the key that a given query needs to attend to. Here is the function: def get_q_k(input_size, window_size, stride, device): """ Get the index of the key that a given query needs to attend to. """ second_length = input_size // stride second_last = input_size - (second_length - 1) * stride third_start = input_size + second_length third_length = second_length // stride third_last = second_length - (third_length - 1) * stride max_attn = max(second_last, third_last) fourth_start = third_start + third_length fourth_length = third_length // stride full_length = fourth_start + fourth_length fourth_last = third_length - (fourth_length - 1) * stride max_attn = max(third_last, fourth_last) max_attn += window_size + 1 mask = torch.zeros(full_length, max_attn, dtype=torch.int32, device=device) - 1 for i in range(input_size): mask[i, 0:window_size] = i + torch.arange(window_size) - window_size // 2 mask[i, mask[i] > input_size - 1] = -1 mask[i, -1] = i // stride + input_size mask[i][mask[i] > third_start - 1] = third_start - 1 for i in range(second_length): mask[input_size+i, 0:window_size] = input_size + i + torch.arange(window_size) - window_size // 2 mask[input_size+i, mask[input_size+i] < input_size] = -1 mask[input_size+i, mask[input_size+i] > third_start - 1] = -1 if i < second_length - 1: mask[input_size+i, window_size:(window_size+stride)] = torch.arange(stride) + i * stride else: mask[input_size+i, window_size:(window_size+second_last)] = torch.arange(second_last) + i * stride mask[input_size+i, -1] = i // stride + third_start mask[input_size+i, mask[input_size+i] > fourth_start - 1] = fourth_start - 1 for i in range(third_length): mask[third_start+i, 0:window_size] = third_start + i + torch.arange(window_size) - window_size // 2 mask[third_start+i, mask[third_start+i] < third_start] = -1 mask[third_start+i, mask[third_start+i] > fourth_start - 1] = -1 if i < third_length - 1: mask[third_start+i, window_size:(window_size+stride)] = input_size + torch.arange(stride) + i * stride else: mask[third_start+i, window_size:(window_size+third_last)] = input_size + torch.arange(third_last) + i * stride mask[third_start+i, -1] = i // stride + fourth_start mask[third_start+i, mask[third_start+i] > full_length - 1] = full_length - 1 for i in range(fourth_length): mask[fourth_start+i, 0:window_size] = fourth_start + i + torch.arange(window_size) - window_size // 2 mask[fourth_start+i, mask[fourth_start+i] < fourth_start] = -1 mask[fourth_start+i, mask[fourth_start+i] > full_length - 1] = -1 if i < fourth_length - 1: mask[fourth_start+i, window_size:(window_size+stride)] = third_start + torch.arange(stride) + i * stride else: mask[fourth_start+i, window_size:(window_size+fourth_last)] = third_start + torch.arange(fourth_last) + i * stride return mask
Get the index of the key that a given query needs to attend to.
13,486
from torch.functional import align_tensors import torch.nn as nn from torch.nn.modules.linear import Linear from .SubLayers import MultiHeadAttention, PositionwiseFeedForward import torch from .embed import DataEmbedding, CustomEmbedding import math The provided code snippet includes necessary dependencies for implementing the `get_k_q` function. Write a Python function `def get_k_q(q_k_mask)` to solve the following problem: Get the index of the query that can attend to the given key. Here is the function: def get_k_q(q_k_mask): """ Get the index of the query that can attend to the given key. """ k_q_mask = q_k_mask.clone() for i in range(len(q_k_mask)): for j in range(len(q_k_mask[0])): if q_k_mask[i, j] >= 0: k_q_mask[i, j] = torch.where(q_k_mask[q_k_mask[i, j]] ==i )[0] return k_q_mask
Get the index of the query that can attend to the given key.
13,487
import argparse import time import torch import torch.optim as optim from torch.utils.data.sampler import RandomSampler from tqdm import tqdm import os import pyraformer.Pyraformer_SS as Pyraformer from data_loader import * import os from utils.tools import SingleStepLoss as LossFactory from utils.tools import AE_loss The provided code snippet includes necessary dependencies for implementing the `get_dataset_parameters` function. Write a Python function `def get_dataset_parameters(opt)` to solve the following problem: Prepare specific parameters for different datasets Here is the function: def get_dataset_parameters(opt): """Prepare specific parameters for different datasets""" dataset2num = { 'elect': 370, 'flow': 1083, 'wind': 29 } dataset2covariate = { 'elect':3, 'flow': 3, 'wind': 3 } dataset2input_len = { 'elect':169, 'flow': 192, 'wind': 192 } dataset2ignore_zero = { 'elect': True, 'flow': True, 'wind': False } opt.num_seq = dataset2num[opt.dataset] opt.covariate_size = dataset2covariate[opt.dataset] opt.input_size = dataset2input_len[opt.dataset] opt.ignore_zero = dataset2ignore_zero[opt.dataset] return opt
Prepare specific parameters for different datasets
13,488
import argparse import time import torch import torch.optim as optim from torch.utils.data.sampler import RandomSampler from tqdm import tqdm import os import pyraformer.Pyraformer_SS as Pyraformer from data_loader import * import os from utils.tools import SingleStepLoss as LossFactory from utils.tools import AE_loss def prepare_dataloader(opt): """ Load data and prepare dataloader. """ data_dir = opt.data_path dataset = opt.dataset train_set = eval(dataset+'TrainDataset')(data_dir, dataset, opt.predict_step, opt.inner_batch) test_set = eval(dataset+'TestDataset')(data_dir, dataset, opt.predict_step) train_sampler = RandomSampler(train_set) test_sampler = RandomSampler(test_set) trainloader = DataLoader(train_set, batch_size=1, sampler=train_sampler, num_workers=0) testloader = DataLoader(test_set, batch_size=1, sampler=test_sampler, num_workers=0) return trainloader, testloader def eval_epoch(model, validation_data, opt): """ Epoch operation in evaluation phase. """ model.eval() total_likelihood = 0 total_se = 0 total_ae = 0 total_label = 0 total_pred_num = 0 index = 0 criterion = LossFactory(opt.ignore_zero) with torch.no_grad(): for batch in tqdm(validation_data, mininterval=2, desc=' - (Validation) ', leave=False): """ prepare data """ sequence, label, v = map(lambda x: x.to(opt.device).squeeze(0), batch) """ forward """ mu_pre, sigma_pre = model.test(sequence, v) likelihood_losses, mse_losses = criterion(mu_pre, sigma_pre, label) ae_losses = AE_loss(mu_pre, label, opt.ignore_zero) index += 1 total_likelihood += torch.sum(likelihood_losses).detach().double() total_se += torch.sum(mse_losses).detach().double() total_ae += torch.sum(ae_losses).detach().double() total_label += torch.sum(label).detach().item() total_pred_num += len(likelihood_losses) se = torch.sqrt(total_se / total_pred_num) / (total_label / total_pred_num) ae = total_ae / total_label return total_likelihood / total_pred_num, se, ae The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(model, opt, model_save_dir)` to solve the following problem: Evaluate preptrained models Here is the function: def evaluate(model, opt, model_save_dir): """Evaluate preptrained models""" index_names = ['Log-Likelihood', 'NMSE', 'NMAE'] """ prepare dataloader """ _, validation_data = prepare_dataloader(opt) """ load pretrained model """ checkpoint = torch.load(model_save_dir)["model"] model.load_state_dict(checkpoint) start = time.time() valid_likelihood, valid_mse, valid_mae = eval_epoch(model, validation_data, opt) print(' - (Testing) loglikelihood: {ll: 8.5f}, ' 'RMSE: {RMSE: 8.5f}, ' 'NMAE: {accuracy: 8.5f}, ' 'elapse: {elapse:3.3f} min' .format(ll=valid_likelihood, RMSE=valid_mse, accuracy=valid_mae, elapse=(time.time() - start) / 60)) best_metrics = [valid_likelihood, valid_mse, valid_mae] print(index_names) print(best_metrics) return index_names, best_metrics
Evaluate preptrained models
13,489
import argparse import time import torch import torch.optim as optim from torch.utils.data.sampler import RandomSampler from tqdm import tqdm import os import pyraformer.Pyraformer_SS as Pyraformer from data_loader import * import os from utils.tools import SingleStepLoss as LossFactory from utils.tools import AE_loss def arg_parser(): parser = argparse.ArgumentParser() # running mode parser.add_argument('-eval', action='store_true', default=False) # Path parameters parser.add_argument('-data_path', type=str, default='data/elect/') parser.add_argument('-dataset', type=str, default='elect') # Train parameters parser.add_argument('-epoch', type=int, default=10) parser.add_argument('-inner_batch', type=int, default=8) # Equivalent batch size parser.add_argument('-lr', type=float, default=1e-5) parser.add_argument('-visualize_fre', type=int, default=2000) parser.add_argument('-pretrain', action='store_false', default=True) parser.add_argument('-hard_sample_mining', action='store_false', default=True) # Model parameters parser.add_argument('-model', type=str, default='Pyraformer') parser.add_argument('-d_model', type=int, default=512) parser.add_argument('-d_inner_hid', type=int, default=512) parser.add_argument('-d_k', type=int, default=128) parser.add_argument('-d_v', type=int, default=128) parser.add_argument('-n_head', type=int, default=4) parser.add_argument('-n_layer', type=int, default=4) parser.add_argument('-dropout', type=float, default=0.1) # Pyraformer parameters parser.add_argument('-window_size', type=str, default='[4, 4, 4]') # # The number of children of a parent node. parser.add_argument('-inner_size', type=int, default=3) # The number of ajacent nodes. parser.add_argument('-use_tvm', action='store_true', default=False) # Whether to use TVM. # Test parameter parser.add_argument('-predict_step', type=int, default=24) opt = parser.parse_args() return opt
null
13,490
from typing import List import numpy as np import pandas as pd from pandas.tseries import offsets from pandas.tseries.frequencies import to_offset def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: """ Returns a list of time features that will be appropriate for the given frequency string. Parameters ---------- freq_str Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. """ features_by_offsets = { offsets.YearEnd: [], offsets.QuarterEnd: [MonthOfYear], offsets.MonthEnd: [MonthOfYear], offsets.Week: [DayOfMonth, WeekOfYear], offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], offsets.Minute: [ MinuteOfHour, HourOfDay, DayOfWeek, DayOfMonth, DayOfYear, ], offsets.Second: [ SecondOfMinute, MinuteOfHour, HourOfDay, DayOfWeek, DayOfMonth, DayOfYear, ], } offset = to_offset(freq_str) for offset_type, feature_classes in features_by_offsets.items(): if isinstance(offset, offset_type): return [cls() for cls in feature_classes] supported_freq_msg = f""" Unsupported frequency {freq_str} The following frequencies are supported: Y - yearly alias: A M - monthly W - weekly D - daily B - business days H - hourly T - minutely alias: min S - secondly """ raise RuntimeError(supported_freq_msg) def time_features(dates, timeenc=1, freq='h'): if timeenc==0: dates['month'] = dates.date.apply(lambda row:row.month,1) dates['day'] = dates.date.apply(lambda row:row.day,1) dates['weekday'] = dates.date.apply(lambda row:row.weekday(),1) dates['hour'] = dates.date.apply(lambda row:row.hour,1) dates['minute'] = dates.date.apply(lambda row:row.minute,1) dates['minute'] = dates.minute.map(lambda x:x//15) freq_map = { 'y':[],'m':['month'],'w':['month'],'d':['month','day','weekday'], 'b':['month','day','weekday'],'h':['month','day','weekday','hour'], 't':['month','day','weekday','hour','minute'], } return dates[freq_map[freq.lower()]].values if timeenc==1: dates = pd.to_datetime(dates.date.values) return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)]).transpose(1,0)
null
13,491
import numpy as np import torch import torch.nn as nn The provided code snippet includes necessary dependencies for implementing the `get_frequency_modes` function. Write a Python function `def get_frequency_modes(seq_len, modes=64, mode_select_method='random')` to solve the following problem: get modes on frequency domain: 'random' means sampling randomly; 'else' means sampling the lowest modes; Here is the function: def get_frequency_modes(seq_len, modes=64, mode_select_method='random'): """ get modes on frequency domain: 'random' means sampling randomly; 'else' means sampling the lowest modes; """ modes = min(modes, seq_len//2) if mode_select_method == 'random': index = list(range(0, seq_len // 2)) np.random.shuffle(index) index = index[:modes] else: index = list(range(0, modes)) index.sort() return index
get modes on frequency domain: 'random' means sampling randomly; 'else' means sampling the lowest modes;
13,492
import torch import torch.nn as nn import numpy as np from functools import partial from scipy.special import eval_legendre from sympy import Poly, legendre, Symbol, chebyshevt def legendreDer(k, x): def _legendre(k, x): return (2*k+1) * eval_legendre(k, x) out = 0 for i in np.arange(k-1,-1,-2): out += _legendre(i, x) return out def get_phi_psi(k, base): x = Symbol('x') phi_coeff = np.zeros((k,k)) phi_2x_coeff = np.zeros((k,k)) if base == 'legendre': for ki in range(k): coeff_ = Poly(legendre(ki, 2*x-1), x).all_coeffs() phi_coeff[ki,:ki+1] = np.flip(np.sqrt(2*ki+1) * np.array(coeff_).astype(np.float64)) coeff_ = Poly(legendre(ki, 4*x-1), x).all_coeffs() phi_2x_coeff[ki,:ki+1] = np.flip(np.sqrt(2) * np.sqrt(2*ki+1) * np.array(coeff_).astype(np.float64)) psi1_coeff = np.zeros((k, k)) psi2_coeff = np.zeros((k, k)) for ki in range(k): psi1_coeff[ki,:] = phi_2x_coeff[ki,:] for i in range(k): a = phi_2x_coeff[ki,:ki+1] b = phi_coeff[i, :i+1] prod_ = np.convolve(a, b) prod_[np.abs(prod_)<1e-8] = 0 proj_ = (prod_ * 1/(np.arange(len(prod_))+1) * np.power(0.5, 1+np.arange(len(prod_)))).sum() psi1_coeff[ki,:] -= proj_ * phi_coeff[i,:] psi2_coeff[ki,:] -= proj_ * phi_coeff[i,:] for j in range(ki): a = phi_2x_coeff[ki,:ki+1] b = psi1_coeff[j, :] prod_ = np.convolve(a, b) prod_[np.abs(prod_)<1e-8] = 0 proj_ = (prod_ * 1/(np.arange(len(prod_))+1) * np.power(0.5, 1+np.arange(len(prod_)))).sum() psi1_coeff[ki,:] -= proj_ * psi1_coeff[j,:] psi2_coeff[ki,:] -= proj_ * psi2_coeff[j,:] a = psi1_coeff[ki,:] prod_ = np.convolve(a, a) prod_[np.abs(prod_)<1e-8] = 0 norm1 = (prod_ * 1/(np.arange(len(prod_))+1) * np.power(0.5, 1+np.arange(len(prod_)))).sum() a = psi2_coeff[ki,:] prod_ = np.convolve(a, a) prod_[np.abs(prod_)<1e-8] = 0 norm2 = (prod_ * 1/(np.arange(len(prod_))+1) * (1-np.power(0.5, 1+np.arange(len(prod_))))).sum() norm_ = np.sqrt(norm1 + norm2) psi1_coeff[ki,:] /= norm_ psi2_coeff[ki,:] /= norm_ psi1_coeff[np.abs(psi1_coeff)<1e-8] = 0 psi2_coeff[np.abs(psi2_coeff)<1e-8] = 0 phi = [np.poly1d(np.flip(phi_coeff[i,:])) for i in range(k)] psi1 = [np.poly1d(np.flip(psi1_coeff[i,:])) for i in range(k)] psi2 = [np.poly1d(np.flip(psi2_coeff[i,:])) for i in range(k)] elif base == 'chebyshev': for ki in range(k): if ki == 0: phi_coeff[ki,:ki+1] = np.sqrt(2/np.pi) phi_2x_coeff[ki,:ki+1] = np.sqrt(2/np.pi) * np.sqrt(2) else: coeff_ = Poly(chebyshevt(ki, 2*x-1), x).all_coeffs() phi_coeff[ki,:ki+1] = np.flip(2/np.sqrt(np.pi) * np.array(coeff_).astype(np.float64)) coeff_ = Poly(chebyshevt(ki, 4*x-1), x).all_coeffs() phi_2x_coeff[ki,:ki+1] = np.flip(np.sqrt(2) * 2 / np.sqrt(np.pi) * np.array(coeff_).astype(np.float64)) phi = [partial(phi_, phi_coeff[i,:]) for i in range(k)] x = Symbol('x') kUse = 2*k roots = Poly(chebyshevt(kUse, 2*x-1)).all_roots() x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64) # x_m[x_m==0.5] = 0.5 + 1e-8 # add small noise to avoid the case of 0.5 belonging to both phi(2x) and phi(2x-1) # not needed for our purpose here, we use even k always to avoid wm = np.pi / kUse / 2 psi1_coeff = np.zeros((k, k)) psi2_coeff = np.zeros((k, k)) psi1 = [[] for _ in range(k)] psi2 = [[] for _ in range(k)] for ki in range(k): psi1_coeff[ki,:] = phi_2x_coeff[ki,:] for i in range(k): proj_ = (wm * phi[i](x_m) * np.sqrt(2)* phi[ki](2*x_m)).sum() psi1_coeff[ki,:] -= proj_ * phi_coeff[i,:] psi2_coeff[ki,:] -= proj_ * phi_coeff[i,:] for j in range(ki): proj_ = (wm * psi1[j](x_m) * np.sqrt(2) * phi[ki](2*x_m)).sum() psi1_coeff[ki,:] -= proj_ * psi1_coeff[j,:] psi2_coeff[ki,:] -= proj_ * psi2_coeff[j,:] psi1[ki] = partial(phi_, psi1_coeff[ki,:], lb = 0, ub = 0.5) psi2[ki] = partial(phi_, psi2_coeff[ki,:], lb = 0.5, ub = 1) norm1 = (wm * psi1[ki](x_m) * psi1[ki](x_m)).sum() norm2 = (wm * psi2[ki](x_m) * psi2[ki](x_m)).sum() norm_ = np.sqrt(norm1 + norm2) psi1_coeff[ki,:] /= norm_ psi2_coeff[ki,:] /= norm_ psi1_coeff[np.abs(psi1_coeff)<1e-8] = 0 psi2_coeff[np.abs(psi2_coeff)<1e-8] = 0 psi1[ki] = partial(phi_, psi1_coeff[ki,:], lb = 0, ub = 0.5+1e-16) psi2[ki] = partial(phi_, psi2_coeff[ki,:], lb = 0.5+1e-16, ub = 1) return phi, psi1, psi2 def get_filter(base, k): def psi(psi1, psi2, i, inp): mask = (inp<=0.5) * 1.0 return psi1[i](inp) * mask + psi2[i](inp) * (1-mask) if base not in ['legendre', 'chebyshev']: raise Exception('Base not supported') x = Symbol('x') H0 = np.zeros((k,k)) H1 = np.zeros((k,k)) G0 = np.zeros((k,k)) G1 = np.zeros((k,k)) PHI0 = np.zeros((k,k)) PHI1 = np.zeros((k,k)) phi, psi1, psi2 = get_phi_psi(k, base) if base == 'legendre': roots = Poly(legendre(k, 2*x-1)).all_roots() x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64) wm = 1/k/legendreDer(k,2*x_m-1)/eval_legendre(k-1,2*x_m-1) for ki in range(k): for kpi in range(k): H0[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki](x_m/2) * phi[kpi](x_m)).sum() G0[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, x_m/2) * phi[kpi](x_m)).sum() H1[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki]((x_m+1)/2) * phi[kpi](x_m)).sum() G1[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, (x_m+1)/2) * phi[kpi](x_m)).sum() PHI0 = np.eye(k) PHI1 = np.eye(k) elif base == 'chebyshev': x = Symbol('x') kUse = 2*k roots = Poly(chebyshevt(kUse, 2*x-1)).all_roots() x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64) # x_m[x_m==0.5] = 0.5 + 1e-8 # add small noise to avoid the case of 0.5 belonging to both phi(2x) and phi(2x-1) # not needed for our purpose here, we use even k always to avoid wm = np.pi / kUse / 2 for ki in range(k): for kpi in range(k): H0[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki](x_m/2) * phi[kpi](x_m)).sum() G0[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, x_m/2) * phi[kpi](x_m)).sum() H1[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki]((x_m+1)/2) * phi[kpi](x_m)).sum() G1[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, (x_m+1)/2) * phi[kpi](x_m)).sum() PHI0[ki, kpi] = (wm * phi[ki](2*x_m) * phi[kpi](2*x_m)).sum() * 2 PHI1[ki, kpi] = (wm * phi[ki](2*x_m-1) * phi[kpi](2*x_m-1)).sum() * 2 PHI0[np.abs(PHI0)<1e-8] = 0 PHI1[np.abs(PHI1)<1e-8] = 0 H0[np.abs(H0)<1e-8] = 0 H1[np.abs(H1)<1e-8] = 0 G0[np.abs(G0)<1e-8] = 0 G1[np.abs(G1)<1e-8] = 0 return H0, H1, G0, G1, PHI0, PHI1
null
13,493
import torch import torch.nn as nn import numpy as np from functools import partial from scipy.special import eval_legendre from sympy import Poly, legendre, Symbol, chebyshevt def train(model, train_loader, optimizer, epoch, device, verbose = 0, lossFn = None, lr_schedule=None, post_proc = lambda args: args): if lossFn is None: lossFn = nn.MSELoss() model.train() total_loss = 0. for batch_idx, (data, target) in enumerate(train_loader): bs = len(data) data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) target = post_proc(target) output = post_proc(output) loss = lossFn(output.view(bs, -1), target.view(bs, -1)) loss.backward() optimizer.step() total_loss += loss.sum().item() if lr_schedule is not None: lr_schedule.step() if verbose>0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) return total_loss/len(train_loader.dataset)
null
13,494
import time import torch import torch.nn as nn import numpy as np import math from torch.nn.functional import interpolate def decor_time(func): def func2(*args, **kw): now = time.time() y = func(*args, **kw) t = time.time() - now print('call <{}>, time={}'.format(func.__name__, t)) return y return func2
null
13,495
from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom,Dataset_sin from torch.utils.data import DataLoader data_dict = { 'ETTh1': Dataset_ETT_hour, 'ETTh2': Dataset_ETT_hour, 'ETTm1': Dataset_ETT_minute, 'ETTm2': Dataset_ETT_minute, 'custom': Dataset_Custom, 'sin':Dataset_sin, } def data_provider(args, flag): Data = data_dict[args.data] timeenc = 0 if args.embed != 'timeF' else 1 if flag == 'test': shuffle_flag = False drop_last = True batch_size = args.batch_size freq = args.freq elif flag == 'pred': shuffle_flag = False drop_last = False batch_size = 1 freq = args.detail_freq Data = Dataset_Pred else: shuffle_flag = True drop_last = True batch_size = args.batch_size freq = args.freq data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target, timeenc=timeenc, freq=freq ) print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader
null
13,496
from typing import List import numpy as np import pandas as pd from pandas.tseries import offsets from pandas.tseries.frequencies import to_offset def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: def time_features(dates, freq='h'): return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)])
null
13,497
import numpy as np import torch import matplotlib.pyplot as plt def adjust_learning_rate(optimizer, epoch, args): # lr = args.learning_rate * (0.2 ** (epoch // 2)) if args.lradj == 'type1': lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))} elif args.lradj == 'type2': lr_adjust = { 2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6, 10: 5e-7, 15: 1e-7, 20: 5e-8 } elif args.lradj =='type3': lr_adjust = {epoch: args.learning_rate} elif args.lradj == 'type4': lr_adjust = {epoch: args.learning_rate * (0.9 ** ((epoch - 1) // 1))} if epoch in lr_adjust.keys(): lr = lr_adjust[epoch] for param_group in optimizer.param_groups: param_group['lr'] = lr print('Updating learning rate to {}'.format(lr))
null
13,498
import numpy as np import torch import matplotlib.pyplot as plt plt.switch_backend('agg') The provided code snippet includes necessary dependencies for implementing the `visual` function. Write a Python function `def visual(true, preds=None, name='./pic/test.pdf')` to solve the following problem: Results visualization Here is the function: def visual(true, preds=None, name='./pic/test.pdf'): """ Results visualization """ plt.figure() plt.plot(true, label='GroundTruth', linewidth=2) if preds is not None: plt.plot(preds, label='Prediction', linewidth=2) plt.legend() plt.savefig(name, bbox_inches='tight')
Results visualization
13,499
import numpy as np def MAE(pred, true): return np.mean(np.abs(pred - true)) def MSE(pred, true): return np.mean((pred - true) ** 2) def RMSE(pred, true): return np.sqrt(MSE(pred, true)) def MAPE(pred, true): return np.mean(np.abs((pred - true) / true)) def MSPE(pred, true): return np.mean(np.square((pred - true) / true)) def metric(pred, true): mae = MAE(pred, true) mse = MSE(pred, true) rmse = RMSE(pred, true) mape = MAPE(pred, true) mspe = MSPE(pred, true) return mae, mse, rmse, mape, mspe
null
13,500
import numpy as np def RSE(pred, true): return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) def CORR(pred, true): u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) return (u / d).mean(-1) def MAE(pred, true): return np.mean(np.abs(pred - true)) def MSE(pred, true): return np.mean((pred - true) ** 2) def RMSE(pred, true): return np.sqrt(MSE(pred, true)) def MAPE(pred, true): return np.mean(np.abs((pred - true) / true)) def MSPE(pred, true): return np.mean(np.square((pred - true) / true)) def metric2(pred, true): mae = MAE(pred, true) mse = MSE(pred, true) rmse = RMSE(pred, true) mape = MAPE(pred, true) mspe = MSPE(pred, true) rse = RSE(pred, true) corr = CORR(pred, true) return mae, mse, rmse, mape, mspe, rse, corr
null
13,501
from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred from torch.utils.data import DataLoader data_dict = { 'ETTh1': Dataset_ETT_hour, 'ETTh2': Dataset_ETT_hour, 'ETTm1': Dataset_ETT_minute, 'ETTm2': Dataset_ETT_minute, 'custom': Dataset_Custom, } class Dataset_Pred(Dataset): def __init__(self, root_path, flag='pred', size=None, features='S', data_path='ETTh1.csv', target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None): def __read_data__(self): def __getitem__(self, index): def __len__(self): def inverse_transform(self, data): def data_provider(args, flag): Data = data_dict[args.data] timeenc = 0 if args.embed != 'timeF' else 1 if flag == 'test': shuffle_flag = False drop_last = True batch_size = args.batch_size freq = args.freq elif flag == 'pred': shuffle_flag = False drop_last = False batch_size = 1 freq = args.freq Data = Dataset_Pred else: shuffle_flag = True drop_last = True batch_size = args.batch_size freq = args.freq data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target, timeenc=timeenc, freq=freq ) print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader
null
13,502
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from tqdm import tqdm import pmdarima as pm import threading from sklearn.ensemble import GradientBoostingRegressor def _arima(seq,pred_len,bt,i): model = pm.auto_arima(seq) forecasts = model.predict(pred_len) return forecasts,bt,i
null
13,503
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from tqdm import tqdm import pmdarima as pm import threading from sklearn.ensemble import GradientBoostingRegressor def _sarima(season,seq,pred_len,bt,i): model = pm.auto_arima(seq, seasonal=True, m=season) forecasts = model.predict(pred_len) return forecasts,bt,i
null
13,504
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from tqdm import tqdm import pmdarima as pm import threading from sklearn.ensemble import GradientBoostingRegressor def _gbrt(seq,seq_len,pred_len,bt,i): model = GradientBoostingRegressor() model.fit(np.arange(seq_len).reshape(-1,1),seq.reshape(-1,1)) forecasts = model.predict(np.arange(seq_len,seq_len+pred_len).reshape(-1,1)) return forecasts,bt,i
null
13,506
import numpy as np import torch import matplotlib.pyplot as plt import time def adjust_learning_rate(optimizer, scheduler, epoch, args, printout=True): # lr = args.learning_rate * (0.2 ** (epoch // 2)) if args.lradj == 'type1': lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))} elif args.lradj == 'type2': lr_adjust = { 2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6, 10: 5e-7, 15: 1e-7, 20: 5e-8 } elif args.lradj == 'type3': lr_adjust = {epoch: args.learning_rate if epoch < 3 else args.learning_rate * (0.9 ** ((epoch - 3) // 1))} elif args.lradj == 'constant': lr_adjust = {epoch: args.learning_rate} elif args.lradj == '3': lr_adjust = {epoch: args.learning_rate if epoch < 10 else args.learning_rate*0.1} elif args.lradj == '4': lr_adjust = {epoch: args.learning_rate if epoch < 15 else args.learning_rate*0.1} elif args.lradj == '5': lr_adjust = {epoch: args.learning_rate if epoch < 25 else args.learning_rate*0.1} elif args.lradj == '6': lr_adjust = {epoch: args.learning_rate if epoch < 5 else args.learning_rate*0.1} elif args.lradj == 'TST': lr_adjust = {epoch: scheduler.get_last_lr()[0]} if epoch in lr_adjust.keys(): lr = lr_adjust[epoch] for param_group in optimizer.param_groups: param_group['lr'] = lr if printout: print('Updating learning rate to {}'.format(lr))
null
13,507
import numpy as np import torch import matplotlib.pyplot as plt import time plt.switch_backend('agg') The provided code snippet includes necessary dependencies for implementing the `visual` function. Write a Python function `def visual(true, preds=None, name='./pic/test.pdf')` to solve the following problem: Results visualization Here is the function: def visual(true, preds=None, name='./pic/test.pdf'): """ Results visualization """ plt.figure() plt.plot(true, label='GroundTruth', linewidth=2) if preds is not None: plt.plot(preds, label='Prediction', linewidth=2) plt.legend() plt.savefig(name, bbox_inches='tight')
Results visualization
13,508
import numpy as np import torch import matplotlib.pyplot as plt import time The provided code snippet includes necessary dependencies for implementing the `test_params_flop` function. Write a Python function `def test_params_flop(model,x_shape)` to solve the following problem: If you want to thest former's flop, you need to give default value to inputs in model.forward(), the following code can only pass one argument to forward() Here is the function: def test_params_flop(model,x_shape): """ If you want to thest former's flop, you need to give default value to inputs in model.forward(), the following code can only pass one argument to forward() """ model_params = 0 for parameter in model.parameters(): model_params += parameter.numel() print('INFO: Trainable parameter count: {:.2f}M'.format(model_params / 1000000.0)) from ptflops import get_model_complexity_info with torch.cuda.device(0): macs, params = get_model_complexity_info(model.cuda(), x_shape, as_strings=True, print_per_layer_stat=True) # print('Flops:' + flops) # print('Params:' + params) print('{:<30} {:<8}'.format('Computational complexity: ', macs)) print('{:<30} {:<8}'.format('Number of parameters: ', params))
If you want to thest former's flop, you need to give default value to inputs in model.forward(), the following code can only pass one argument to forward()
13,509
import numpy as np def RSE(pred, true): def CORR(pred, true): def MAE(pred, true): def MSE(pred, true): def RMSE(pred, true): def MAPE(pred, true): def MSPE(pred, true): def metric(pred, true): mae = MAE(pred, true) mse = MSE(pred, true) rmse = RMSE(pred, true) mape = MAPE(pred, true) mspe = MSPE(pred, true) rse = RSE(pred, true) corr = CORR(pred, true) return mae, mse, rmse, mape, mspe, rse, corr
null
13,510
import os from config import ANTHROPIC_API_KEY from llm import stream_claude_response, stream_openai_response from prompts import assemble_prompt from prompts.types import Stack ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", None) async def stream_openai_response( messages: List[ChatCompletionMessageParam], api_key: str, base_url: str | None, callback: Callable[[str], Awaitable[None]], ) -> str: client = AsyncOpenAI(api_key=api_key, base_url=base_url) model = Llm.GPT_4_VISION # Base parameters params = { "model": model.value, "messages": messages, "stream": True, "timeout": 600, } # Add 'max_tokens' only if the model is a GPT4 vision model if model == Llm.GPT_4_VISION: params["max_tokens"] = 4096 params["temperature"] = 0 stream = await client.chat.completions.create(**params) # type: ignore full_response = "" async for chunk in stream: # type: ignore assert isinstance(chunk, ChatCompletionChunk) content = chunk.choices[0].delta.content or "" full_response += content await callback(content) await client.close() return full_response async def stream_claude_response( messages: List[ChatCompletionMessageParam], api_key: str, callback: Callable[[str], Awaitable[None]], ) -> str: client = AsyncAnthropic(api_key=api_key) # Base parameters model = Llm.CLAUDE_3_SONNET max_tokens = 4096 temperature = 0.0 # Translate OpenAI messages to Claude messages system_prompt = cast(str, messages[0].get("content")) claude_messages = [dict(message) for message in messages[1:]] for message in claude_messages: if not isinstance(message["content"], list): continue for content in message["content"]: # type: ignore if content["type"] == "image_url": content["type"] = "image" # Extract base64 data and media type from data URL # Example base64 data URL: data:image/png;base64,iVBOR... image_data_url = cast(str, content["image_url"]["url"]) media_type = image_data_url.split(";")[0].split(":")[1] base64_data = image_data_url.split(",")[1] # Remove OpenAI parameter del content["image_url"] content["source"] = { "type": "base64", "media_type": media_type, "data": base64_data, } # Stream Claude response async with client.messages.stream( model=model.value, max_tokens=max_tokens, temperature=temperature, system=system_prompt, messages=claude_messages, # type: ignore ) as stream: async for text in stream.text_stream: await callback(text) # Return final message response = await stream.get_final_message() # Close the Anthropic client await client.close() return response.content[0].text def assemble_prompt( image_data_url: str, stack: Stack, result_image_data_url: Union[str, None] = None, ) -> List[ChatCompletionMessageParam]: system_content = SYSTEM_PROMPTS[stack] user_prompt = USER_PROMPT if stack != "svg" else SVG_USER_PROMPT user_content: List[ChatCompletionContentPartParam] = [ { "type": "image_url", "image_url": {"url": image_data_url, "detail": "high"}, }, { "type": "text", "text": user_prompt, }, ] # Include the result image if it exists if result_image_data_url: user_content.insert( 1, { "type": "image_url", "image_url": {"url": result_image_data_url, "detail": "high"}, }, ) return [ { "role": "system", "content": system_content, }, { "role": "user", "content": user_content, }, ] Stack = Literal[ "html_tailwind", "react_tailwind", "bootstrap", "ionic_tailwind", "vue_tailwind", "svg", ] async def generate_code_core(image_url: str, stack: Stack) -> str: model = "CLAUDE" prompt_messages = assemble_prompt(image_url, stack) openai_api_key = os.environ.get("OPENAI_API_KEY") anthropic_api_key = ANTHROPIC_API_KEY openai_base_url = None async def process_chunk(content: str): pass if model == "CLAUDE": if not anthropic_api_key: raise Exception("Anthropic API key not found") completion = await stream_claude_response( prompt_messages, api_key=anthropic_api_key, callback=lambda x: process_chunk(x), ) else: if not openai_api_key: raise Exception("OpenAI API key not found") completion = await stream_openai_response( prompt_messages, api_key=openai_api_key, base_url=openai_base_url, callback=lambda x: process_chunk(x), ) return completion
null
13,511
from fastapi import APIRouter from fastapi.responses import HTMLResponse async def get_status(): return HTMLResponse( content="<h3>Your backend is running correctly. Please open the front-end URL (default is http://localhost:5173) to use screenshot-to-code.</h3>" )
null
13,512
import os import traceback from fastapi import APIRouter, WebSocket import openai from config import ANTHROPIC_API_KEY, IS_PROD, SHOULD_MOCK_AI_RESPONSE from custom_types import InputMode from llm import ( CODE_GENERATION_MODELS, Llm, stream_claude_response, stream_claude_response_native, stream_openai_response, ) from openai.types.chat import ChatCompletionMessageParam from mock_llm import mock_completion from typing import Dict, List, cast, get_args from image_generation import create_alt_url_mapping, generate_images from prompts import assemble_imported_code_prompt, assemble_prompt from access_token import validate_access_token from datetime import datetime import json from prompts.claude_prompts import VIDEO_PROMPT from prompts.types import Stack from video.utils import extract_tag_content, assemble_claude_prompt_video def write_logs(prompt_messages: List[ChatCompletionMessageParam], completion: str): ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", None) SHOULD_MOCK_AI_RESPONSE = bool(os.environ.get("MOCK", False)) IS_PROD = os.environ.get("IS_PROD", False) InputMode = Literal[ "image", "video", ] class Llm(Enum): CODE_GENERATION_MODELS = [ "gpt_4_vision", "claude_3_sonnet", ] async def stream_openai_response( messages: List[ChatCompletionMessageParam], api_key: str, base_url: str | None, callback: Callable[[str], Awaitable[None]], ) -> str: async def stream_claude_response( messages: List[ChatCompletionMessageParam], api_key: str, callback: Callable[[str], Awaitable[None]], ) -> str: async def stream_claude_response_native( system_prompt: str, messages: list[Any], api_key: str, callback: Callable[[str], Awaitable[None]], include_thinking: bool = False, model: Llm = Llm.CLAUDE_3_OPUS, ) -> str: async def mock_completion( process_chunk: Callable[[str], Awaitable[None]], input_mode: InputMode ) -> str: def create_alt_url_mapping(code: str) -> Dict[str, str]: async def generate_images( code: str, api_key: str, base_url: Union[str, None], image_cache: Dict[str, str] ): def assemble_imported_code_prompt( code: str, stack: Stack, result_image_data_url: Union[str, None] = None ) -> List[ChatCompletionMessageParam]: def assemble_prompt( image_data_url: str, stack: Stack, result_image_data_url: Union[str, None] = None, ) -> List[ChatCompletionMessageParam]: async def validate_access_token(access_code: str): VIDEO_PROMPT = """ You are an expert at building single page, funtional apps using HTML, Jquery and Tailwind CSS. You also have perfect vision and pay great attention to detail. You will be given screenshots in order at consistent intervals from a video of a user interacting with a web app. You need to re-create the same app exactly such that the same user interactions will produce the same results in the app you build. - Make sure the app looks exactly like the screenshot. - Pay close attention to background color, text color, font size, font family, padding, margin, border, etc. Match the colors and sizes exactly. - For images, use placeholder images from https://placehold.co and include a detailed description of the image in the alt text so that an image generation AI can generate the image later. - If some fuctionality requires a backend call, just mock the data instead. - MAKE THE APP FUNCTIONAL using Javascript. Allow the user to interact with the app and get the same behavior as the video. In terms of libraries, - Use this script to include Tailwind: <script src="https://cdn.tailwindcss.com"></script> - You can use Google Fonts - Font Awesome for icons: <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.3/css/all.min.css"></link> - Use jQuery: <script src="https://code.jquery.com/jquery-3.7.1.min.js"></script> Before generating the code for the app, think step-by-step: first, about the user flow depicated in the video and then about you how would you build it and how you would structure the code. Do the thinking within <thinking></thinking> tags. Then, provide your code within <html></html> tags. """ Stack = Literal[ "html_tailwind", "react_tailwind", "bootstrap", "ionic_tailwind", "vue_tailwind", "svg", ] async def assemble_claude_prompt_video(video_data_url: str) -> list[Any]: def extract_tag_content(tag: str, text: str) -> str: async def stream_code(websocket: WebSocket): await websocket.accept() print("Incoming websocket connection...") async def throw_error( message: str, ): await websocket.send_json({"type": "error", "value": message}) await websocket.close() # TODO: Are the values always strings? params: Dict[str, str] = await websocket.receive_json() print("Received params") # Read the code config settings from the request. Fall back to default if not provided. generated_code_config = "" if "generatedCodeConfig" in params and params["generatedCodeConfig"]: generated_code_config = params["generatedCodeConfig"] if not generated_code_config in get_args(Stack): await throw_error(f"Invalid generated code config: {generated_code_config}") return # Cast the variable to the Stack type valid_stack = cast(Stack, generated_code_config) # Validate the input mode input_mode = params.get("inputMode") if not input_mode in get_args(InputMode): await throw_error(f"Invalid input mode: {input_mode}") raise Exception(f"Invalid input mode: {input_mode}") # Cast the variable to the right type validated_input_mode = cast(InputMode, input_mode) # Read the model from the request. Fall back to default if not provided. code_generation_model = params.get("codeGenerationModel", "gpt_4_vision") if code_generation_model not in CODE_GENERATION_MODELS: await throw_error(f"Invalid model: {code_generation_model}") raise Exception(f"Invalid model: {code_generation_model}") exact_llm_version = None print( f"Generating {generated_code_config} code for uploaded {input_mode} using {code_generation_model} model..." ) # Get the OpenAI API key from the request. Fall back to environment variable if not provided. # If neither is provided, we throw an error. openai_api_key = None if "accessCode" in params and params["accessCode"]: print("Access code - using platform API key") res = await validate_access_token(params["accessCode"]) if res["success"]: openai_api_key = os.environ.get("PLATFORM_OPENAI_API_KEY") else: await websocket.send_json( { "type": "error", "value": res["failure_reason"], } ) return else: if params["openAiApiKey"]: openai_api_key = params["openAiApiKey"] print("Using OpenAI API key from client-side settings dialog") else: openai_api_key = os.environ.get("OPENAI_API_KEY") if openai_api_key: print("Using OpenAI API key from environment variable") if not openai_api_key and code_generation_model == "gpt_4_vision": print("OpenAI API key not found") await websocket.send_json( { "type": "error", "value": "No OpenAI API key found. Please add your API key in the settings dialog or add it to backend/.env file. If you add it to .env, make sure to restart the backend server.", } ) return # Get the OpenAI Base URL from the request. Fall back to environment variable if not provided. openai_base_url = None # Disable user-specified OpenAI Base URL in prod if not os.environ.get("IS_PROD"): if "openAiBaseURL" in params and params["openAiBaseURL"]: openai_base_url = params["openAiBaseURL"] print("Using OpenAI Base URL from client-side settings dialog") else: openai_base_url = os.environ.get("OPENAI_BASE_URL") if openai_base_url: print("Using OpenAI Base URL from environment variable") if not openai_base_url: print("Using official OpenAI URL") # Get the image generation flag from the request. Fall back to True if not provided. should_generate_images = ( params["isImageGenerationEnabled"] if "isImageGenerationEnabled" in params else True ) print("generating code...") await websocket.send_json({"type": "status", "value": "Generating code..."}) async def process_chunk(content: str): await websocket.send_json({"type": "chunk", "value": content}) # Image cache for updates so that we don't have to regenerate images image_cache: Dict[str, str] = {} # If this generation started off with imported code, we need to assemble the prompt differently if params.get("isImportedFromCode") and params["isImportedFromCode"]: original_imported_code = params["history"][0] prompt_messages = assemble_imported_code_prompt( original_imported_code, valid_stack ) for index, text in enumerate(params["history"][1:]): if index % 2 == 0: message: ChatCompletionMessageParam = { "role": "user", "content": text, } else: message: ChatCompletionMessageParam = { "role": "assistant", "content": text, } prompt_messages.append(message) else: # Assemble the prompt try: if params.get("resultImage") and params["resultImage"]: prompt_messages = assemble_prompt( params["image"], valid_stack, params["resultImage"] ) else: prompt_messages = assemble_prompt(params["image"], valid_stack) except: await websocket.send_json( { "type": "error", "value": "Error assembling prompt. Contact support at support@picoapps.xyz", } ) await websocket.close() return if params["generationType"] == "update": # Transform the history tree into message format # TODO: Move this to frontend for index, text in enumerate(params["history"]): if index % 2 == 0: message: ChatCompletionMessageParam = { "role": "assistant", "content": text, } else: message: ChatCompletionMessageParam = { "role": "user", "content": text, } prompt_messages.append(message) image_cache = create_alt_url_mapping(params["history"][-2]) if validated_input_mode == "video": video_data_url = params["image"] prompt_messages = await assemble_claude_prompt_video(video_data_url) # pprint_prompt(prompt_messages) # type: ignore if SHOULD_MOCK_AI_RESPONSE: completion = await mock_completion( process_chunk, input_mode=validated_input_mode ) else: try: if validated_input_mode == "video": if not ANTHROPIC_API_KEY: await throw_error( "Video only works with Anthropic models. No Anthropic API key found. Please add the environment variable ANTHROPIC_API_KEY to backend/.env" ) raise Exception("No Anthropic key") completion = await stream_claude_response_native( system_prompt=VIDEO_PROMPT, messages=prompt_messages, # type: ignore api_key=ANTHROPIC_API_KEY, callback=lambda x: process_chunk(x), model=Llm.CLAUDE_3_OPUS, include_thinking=True, ) exact_llm_version = Llm.CLAUDE_3_OPUS elif code_generation_model == "claude_3_sonnet": if not ANTHROPIC_API_KEY: await throw_error( "No Anthropic API key found. Please add the environment variable ANTHROPIC_API_KEY to backend/.env" ) raise Exception("No Anthropic key") completion = await stream_claude_response( prompt_messages, # type: ignore api_key=ANTHROPIC_API_KEY, callback=lambda x: process_chunk(x), ) exact_llm_version = Llm.CLAUDE_3_SONNET else: completion = await stream_openai_response( prompt_messages, # type: ignore api_key=openai_api_key, base_url=openai_base_url, callback=lambda x: process_chunk(x), ) exact_llm_version = Llm.GPT_4_VISION except openai.AuthenticationError as e: print("[GENERATE_CODE] Authentication failed", e) error_message = ( "Incorrect OpenAI key. Please make sure your OpenAI API key is correct, or create a new OpenAI API key on your OpenAI dashboard." + ( " Alternatively, you can purchase code generation credits directly on this website." if IS_PROD else "" ) ) return await throw_error(error_message) except openai.NotFoundError as e: print("[GENERATE_CODE] Model not found", e) error_message = ( e.message + ". Please make sure you have followed the instructions correctly to obtain an OpenAI key with GPT vision access: https://github.com/abi/screenshot-to-code/blob/main/Troubleshooting.md" + ( " Alternatively, you can purchase code generation credits directly on this website." if IS_PROD else "" ) ) return await throw_error(error_message) except openai.RateLimitError as e: print("[GENERATE_CODE] Rate limit exceeded", e) error_message = ( "OpenAI error - 'You exceeded your current quota, please check your plan and billing details.'" + ( " Alternatively, you can purchase code generation credits directly on this website." if IS_PROD else "" ) ) return await throw_error(error_message) if validated_input_mode == "video": completion = extract_tag_content("html", completion) print("Exact used model for generation: ", exact_llm_version) # Write the messages dict into a log so that we can debug later write_logs(prompt_messages, completion) # type: ignore try: if should_generate_images: await websocket.send_json( {"type": "status", "value": "Generating images..."} ) updated_html = await generate_images( completion, api_key=openai_api_key, base_url=openai_base_url, image_cache=image_cache, ) else: updated_html = completion await websocket.send_json({"type": "setCode", "value": updated_html}) await websocket.send_json( {"type": "status", "value": "Code generation complete."} ) except Exception as e: traceback.print_exc() print("Image generation failed", e) # Send set code even if image generation fails since that triggers # the frontend to update history await websocket.send_json({"type": "setCode", "value": completion}) await websocket.send_json( {"type": "status", "value": "Image generation failed but code is complete."} ) await websocket.close()
null
13,513
import os from fastapi import APIRouter from pydantic import BaseModel from evals.utils import image_to_data_url from evals.config import EVALS_DIR class Eval(BaseModel): input: str output: str async def image_to_data_url(filepath: str): with open(filepath, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode() return f"data:image/png;base64,{encoded_string}" EVALS_DIR = "./evals_data" async def get_evals(): # Get all evals from EVALS_DIR input_dir = EVALS_DIR + "/inputs" output_dir = EVALS_DIR + "/outputs" evals: list[Eval] = [] for file in os.listdir(input_dir): if file.endswith(".png"): input_file_path = os.path.join(input_dir, file) input_file = await image_to_data_url(input_file_path) # Construct the corresponding output file name output_file_name = file.replace(".png", ".html") output_file_path = os.path.join(output_dir, output_file_name) # Check if the output file exists if os.path.exists(output_file_path): with open(output_file_path, "r") as f: output_file_data = f.read() else: output_file_data = "Output file not found." evals.append( Eval( input=input_file, output=output_file_data, ) ) return evals
null
13,514
import base64 from fastapi import APIRouter from pydantic import BaseModel import httpx def bytes_to_data_url(image_bytes: bytes, mime_type: str) -> str: async def capture_screenshot( target_url: str, api_key: str, device: str = "desktop" ) -> bytes: class ScreenshotRequest(BaseModel): class ScreenshotResponse(BaseModel): async def app_screenshot(request: ScreenshotRequest): # Extract the URL from the request body url = request.url api_key = request.apiKey # TODO: Add error handling image_bytes = await capture_screenshot(url, api_key=api_key) # Convert the image bytes to a data url data_url = bytes_to_data_url(image_bytes, "image/png") return ScreenshotResponse(url=data_url)
null
13,515
from dataclasses import dataclass, replace from typing import List, Tuple from minichain import OpenAI, prompt, show, transform, Mock def chat_response(model, state: State) -> State: return model.stream(state) def update(state, chat_output): result = chat_output.split("Assistant:")[-1] return state.push(result) def chat(command, state): state = replace(state, human_input=command) return update(state, chat_response(state))
null