id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
1,859
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_mppd(pl_module, batch): infer = pl_mo...
null
1,860
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_mpfr(pl_module, batch): infer = pl_mo...
null
1,861
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def cost_matrix_cosine(x, y, eps=1e-5): """Compute...
null
1,862
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_imgcls(pl_module, batch): infer = pl_...
null
1,863
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_vqa(pl_module, batch): infer = pl_mod...
null
1,864
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_nlvr2(pl_module, batch): infer1 = pl_...
null
1,865
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_irtr(pl_module, batch): is_training_p...
null
1,866
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def init_weights(module): if isinstance(module, (...
null
1,867
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def vqa_test_step(pl_module, batch, output): id2a...
null
1,868
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def arc_test_step(pl_module, batch, output): retu...
null
1,869
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def vqa_test_wrapup(outs, model_name): rank = tor...
null
1,870
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def arc_test_wrapup(outs, caplen, model_name): ra...
null
1,871
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
null
1,872
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
null
1,873
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3.
1,874
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
1,875
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
1,876
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,877
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,878
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
1,879
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
1,880
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,881
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,882
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,883
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,884
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,885
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,886
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. NOTE: converted weights not currently available, too large for github release hosting.
1,887
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,888
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,889
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
1,890
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
1,891
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
1,892
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
1,893
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,894
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,895
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,896
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,897
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,898
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,899
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,900
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helper...
DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,901
import torch from pytorch_lightning import LightningDataModule from torch.utils.data import DataLoader from transformers import ( DataCollatorForLanguageModeling, DataCollatorForWholeWordMask, BertTokenizer, ) def get_pretrained_tokenizer(from_pretrained): if torch.distributed.is_initialized(): ...
null
1,902
from sacred import Experiment def _loss_names(d): def config(): exp_name = "vilt" seed = 0 datasets = ["coco", "vg", "sbu", "gcc"] loss_names = _loss_names({"itm": 1, "mlm": 1}) batch_size = 4096 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller. ...
null
1,903
from sacred import Experiment def env_dandelin(): data_root = "/data2/dsets/dataset" log_dir = "/data2/vilt/result" num_gpus = 8 num_nodes = 1
null
1,904
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_mlm_itm(): exp_name = "mlm_itm" datasets = ["coco", "vg", "sbu", "gcc"] loss_names = _...
null
1,905
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_mlm_itm_randaug(): exp_name = "mlm_itm_randaug" datasets = ["coco", "vg", "sbu", "gcc"] ...
null
1,906
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_mlm_itm_mpp(): exp_name = "mlm_itm_mpp" datasets = ["coco", "vg", "sbu", "gcc"] loss_n...
null
1,907
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_nlvr2(): exp_name = "finetune_nlvr2" datasets = ["nlvr2"] loss_names = _loss_...
null
1,908
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_nlvr2_randaug(): exp_name = "finetune_nlvr2_randaug" datasets = ["nlvr2"] tra...
null
1,909
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_vqa(): exp_name = "finetune_vqa" datasets = ["vqa"] loss_names = _loss_names(...
null
1,910
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_vqa_randaug(): exp_name = "finetune_vqa_randaug" datasets = ["vqa"] train_tra...
null
1,911
from sacred import Experiment def _loss_names(d): def task_finetune_irtr_coco(): exp_name = "finetune_irtr_coco" datasets = ["coco"] loss_names = _loss_names({"itm": 0.5, "irtr": 1}) batch_size = 256 max_epoch = 10 max_steps = None warmup_steps = 0.1 get_recall_metric = True draw_fa...
null
1,912
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_irtr_coco_randaug(): exp_name = "finetune_irtr_coco_randaug" datasets = ["coco"] ...
null
1,913
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_irtr_f30k(): exp_name = "finetune_irtr_f30k" datasets = ["f30k"] loss_names =...
null
1,914
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_irtr_f30k_randaug(): exp_name = "finetune_irtr_f30k_randaug" datasets = ["f30k"] ...
null
1,915
from sacred import Experiment def step25k(): max_epoch = 100 max_steps = 25000
null
1,916
from sacred import Experiment def step50k(): max_epoch = 100 max_steps = 50000
null
1,917
from sacred import Experiment def step100k(): max_epoch = 100 max_steps = 100000
null
1,918
from sacred import Experiment def step200k(): max_epoch = 200 max_steps = 200000
null
1,919
from sacred import Experiment def vit32_base(): vit = "vit_base_patch32_384" patch_size = 32 hidden_size = 768 num_heads = 12 num_layers = 12
null
1,920
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45] assert -0.45 <= v <= 0.45 if random.random() > 0.5: v = -v v = v * img.size[0] return img.transform(i...
null
1,921
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45] assert -0.45 <= v <= 0.45 if random.random() > 0.5: v = -v v = v * img.size[1] return img.transform(i...
null
1,922
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def Invert(img, _): return PIL.ImageOps.invert(img)
null
1,923
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def Flip(img, _): # not from the paper return PIL.ImageOps.mirror(img)
null
1,924
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2] # assert 0 <= v <= 20 if v < 0: return img w, h = img.size x0 = np.random.uniform(w) y0 = np.random.uniform(...
null
1,925
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def SamplePairing(imgs): # [0, 0.4] def f(img1, v): i = np.random.choice(len(imgs)) img2 = PIL.Image.fromarray(imgs[i]) return PIL.Image.blend(img1, img2, v) r...
null
1,926
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def Identity(img, v): return img
null
1,927
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def ShearX(img, v): # [-0.3, 0.3] assert -0.3 <= v <= 0.3 if random.random() > 0.5: v = -v return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0)) def ShearY(im...
null
1,928
from .utils import ( inception_normalize, MinMaxResize, ) from torchvision import transforms from .randaug import RandAugment class MinMaxResize: def __init__(self, shorter=800, longer=1333): self.min = shorter self.max = longer def __call__(self, x): w, h = x.size scal...
null
1,929
from .utils import ( inception_normalize, MinMaxResize, ) from torchvision import transforms from .randaug import RandAugment class MinMaxResize: def __init__(self, shorter=800, longer=1333): self.min = shorter self.max = longer def __call__(self, x): w, h = x.size scal...
null
1,930
import json import pandas as pd import pyarrow as pa import os from tqdm import tqdm from collections import defaultdict def process(root, iden, row): texts = [r["sentence"] for r in row] labels = [r["label"] for r in row] split = iden.split("-")[0] if iden.startswith("train"): directory = row[0...
null
1,931
import json import pandas as pd import pyarrow as pa import random import os from tqdm import tqdm from glob import glob from collections import defaultdict def path2rest(path, iid2captions, iid2split): name = path.split("/")[-1] with open(path, "rb") as fp: binary = fp.read() captions = iid2caption...
null
1,932
import json import os import pandas as pd import pyarrow as pa import random from tqdm import tqdm from glob import glob from collections import defaultdict def path2rest(path, iid2captions, iid2split): name = path.split("/")[-1] with open(path, "rb") as fp: binary = fp.read() captions = iid2caption...
null
1,933
import json import pandas as pd import pyarrow as pa import random import os from tqdm import tqdm from glob import glob from collections import defaultdict def path2rest(path, iid2captions): name = path.split("/")[-1] iid = int(name[:-4]) with open(path, "rb") as fp: binary = fp.read() cdicts =...
null
1,934
import json import pandas as pd import pyarrow as pa import random import os from tqdm import tqdm from glob import glob from collections import defaultdict, Counter from .glossary import normalize_word def get_score(occurences): if occurences == 0: return 0.0 elif occurences == 1: return 0.3 ...
null
1,935
import json import pandas as pd import pyarrow as pa import gc import random import os from tqdm import tqdm from glob import glob def path2rest(path, iid2captions): def make_arrow(root, dataset_root): with open(f"{root}/annot.json", "r") as fp: captions = json.load(fp) iid2captions = dict() for c...
null
1,936
import json import pandas as pd import pyarrow as pa import gc import random import os from tqdm import tqdm from glob import glob def path2rest(path, iid2captions): def make_arrow(root, dataset_root): for split in ["val", "train"]: with open(f"{root}/{split}_annot.json", "r") as fp: captions =...
null
1,937
import glob import os from setuptools import find_packages, setup import torch from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3" def get_extensions(): this_dir = os.path.d...
null
1,938
import os import sys import mock import sphinx_rtd_theme from recommonmark.parser import CommonMarkParser import adet def autodoc_skip_member(app, what, name, obj, skip, options): # we hide something deliberately if getattr(obj, "__HIDE_SPHINX_DOC__", False): return True # Hide some names that are d...
null
1,939
from torch import nn from detectron2.layers import Conv2d from .deform_conv import DFConv2d from detectron2.layers.batch_norm import get_norm class DFConv2d(nn.Module): def __init__( self, in_channels, out_channels, with_modulated_dcn=True, ...
null
1,940
from detectron2.layers import batched_nms The provided code snippet includes necessary dependencies for implementing the `ml_nms` function. Write a Python function `def ml_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores", label_field="labels")` to solve the following problem: Performs non-max...
Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Args: boxlist (detectron2.structures.Boxes): nms_thresh (float): max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (str):
1,941
import random import numpy as np from fvcore.transforms import transform as T from detectron2.data.transforms import RandomCrop, StandardAugInput from detectron2.structures import BoxMode def adjust_crop(x0, y0, crop_size, instances, eps=1e-3): modified = False x1 = x0 + crop_size[1] y1 = y0 + crop_size[0] ...
Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format.
1,942
import copy import logging import os.path as osp import numpy as np import torch from fvcore.common.file_io import PathManager from PIL import Image from pycocotools import mask as maskUtils from detectron2.data import detection_utils as utils from detectron2.data import transforms as T from detectron2.data.dataset_map...
null
1,943
import os from detectron2.data.datasets.register_coco import register_coco_instances from detectron2.data.datasets.builtin_meta import _get_builtin_metadata from .datasets.text import register_text_instances _PREDEFINED_SPLITS_PIC = { "pic_person_train": ("pic/image/train", "pic/annotations/train_person.json"), ...
null
1,944
import logging import numpy as np import torch from detectron2.data import transforms as T from detectron2.data.detection_utils import \ annotations_to_instances as d2_anno_to_inst from detectron2.data.detection_utils import \ transform_instance_annotations as d2_transform_inst_anno import math def transform_be...
null
1,945
import logging import numpy as np import torch from detectron2.data import transforms as T from detectron2.data.detection_utils import \ annotations_to_instances as d2_anno_to_inst from detectron2.data.detection_utils import \ transform_instance_annotations as d2_transform_inst_anno import math def annotations...
null
1,946
import logging import numpy as np import torch from detectron2.data import transforms as T from detectron2.data.detection_utils import \ annotations_to_instances as d2_anno_to_inst from detectron2.data.detection_utils import \ transform_instance_annotations as d2_transform_inst_anno import math The provided co...
With option to don't use hflip Returns: list[Augmentation]
1,947
import torch from torch.nn import functional as F from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler class Blender(object): def __init__(self, cfg): # fmt: off self.pooler_resolution = cfg.MODEL.BLENDMASK.BOTTOM_RESOLUTION sampling_ratio = cfg.MODEL.B...
null
1,948
from typing import Dict from torch import nn from torch.nn import functional as F from detectron2.utils.registry import Registry from detectron2.layers import ShapeSpec from adet.layers import conv_with_kaiming_uniform BASIS_MODULE_REGISTRY = Registry("BASIS_MODULE") BASIS_MODULE_REGISTRY.__doc__ = """ Registry for bas...
null
1,949
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def aligned_bilinear(tensor, factor): ...
null
1,950
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def compute_basis_stride(images, basis...
null
1,951
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit class folder(nn.Module): def __init...
null
1,952
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def process_gt_instances(gt_instances,...
null
1,953
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def reduce_sum(tensor): world_size...
null
1,954
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def reduce_sum(tensor): def compute_l...
null
1,955
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from dete...
null
1,956
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from dete...
null
1,957
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from dete...
null
1,958
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from dete...
null