Add Batch 7207969a-9c99-499a-9bbe-d9b224d2d2fe
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/96522f6d-42ec-4ef7-aa06-aa49f7a9d433_content_list.json +3 -0
- abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/96522f6d-42ec-4ef7-aa06-aa49f7a9d433_model.json +3 -0
- abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/96522f6d-42ec-4ef7-aa06-aa49f7a9d433_origin.pdf +3 -0
- abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/full.md +266 -0
- abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/images.zip +3 -0
- abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/layout.json +3 -0
- accdiffusionanaccuratemethodforhigherresolutionimagegeneration/7d58acad-7a35-400a-b95e-3e542e5768b9_content_list.json +3 -0
- accdiffusionanaccuratemethodforhigherresolutionimagegeneration/7d58acad-7a35-400a-b95e-3e542e5768b9_model.json +3 -0
- accdiffusionanaccuratemethodforhigherresolutionimagegeneration/7d58acad-7a35-400a-b95e-3e542e5768b9_origin.pdf +3 -0
- accdiffusionanaccuratemethodforhigherresolutionimagegeneration/full.md +416 -0
- accdiffusionanaccuratemethodforhigherresolutionimagegeneration/images.zip +3 -0
- accdiffusionanaccuratemethodforhigherresolutionimagegeneration/layout.json +3 -0
- acceleratingimagegenerationwithsubpathlinearapproximationmodel/ae593f34-493e-4aa3-98d2-67b350c8f228_content_list.json +3 -0
- acceleratingimagegenerationwithsubpathlinearapproximationmodel/ae593f34-493e-4aa3-98d2-67b350c8f228_model.json +3 -0
- acceleratingimagegenerationwithsubpathlinearapproximationmodel/ae593f34-493e-4aa3-98d2-67b350c8f228_origin.pdf +3 -0
- acceleratingimagegenerationwithsubpathlinearapproximationmodel/full.md +348 -0
- acceleratingimagegenerationwithsubpathlinearapproximationmodel/images.zip +3 -0
- acceleratingimagegenerationwithsubpathlinearapproximationmodel/layout.json +3 -0
- acceleratingimagesuperresolutionnetworkswithpixellevelclassification/03b7ebd8-cf31-4867-9bee-a34beac45e82_content_list.json +3 -0
- acceleratingimagesuperresolutionnetworkswithpixellevelclassification/03b7ebd8-cf31-4867-9bee-a34beac45e82_model.json +3 -0
- acceleratingimagesuperresolutionnetworkswithpixellevelclassification/03b7ebd8-cf31-4867-9bee-a34beac45e82_origin.pdf +3 -0
- acceleratingimagesuperresolutionnetworkswithpixellevelclassification/full.md +264 -0
- acceleratingimagesuperresolutionnetworkswithpixellevelclassification/images.zip +3 -0
- acceleratingimagesuperresolutionnetworkswithpixellevelclassification/layout.json +3 -0
- acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/ced3e8e2-1e3c-4b26-b017-e6d2c3938fe5_content_list.json +3 -0
- acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/ced3e8e2-1e3c-4b26-b017-e6d2c3938fe5_model.json +3 -0
- acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/ced3e8e2-1e3c-4b26-b017-e6d2c3938fe5_origin.pdf +3 -0
- acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/full.md +262 -0
- acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/images.zip +3 -0
- acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/layout.json +3 -0
- acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/3a7ac274-49d3-416c-93e4-fbc4558fc467_content_list.json +3 -0
- acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/3a7ac274-49d3-416c-93e4-fbc4558fc467_model.json +3 -0
- acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/3a7ac274-49d3-416c-93e4-fbc4558fc467_origin.pdf +3 -0
- acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/full.md +356 -0
- acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/images.zip +3 -0
- acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/layout.json +3 -0
- action2soundambientawaregenerationofactionsoundsfromegocentricvideos/4aec7362-c326-48a6-8c9d-60c43e8b873f_content_list.json +3 -0
- action2soundambientawaregenerationofactionsoundsfromegocentricvideos/4aec7362-c326-48a6-8c9d-60c43e8b873f_model.json +3 -0
- action2soundambientawaregenerationofactionsoundsfromegocentricvideos/4aec7362-c326-48a6-8c9d-60c43e8b873f_origin.pdf +3 -0
- action2soundambientawaregenerationofactionsoundsfromegocentricvideos/full.md +327 -0
- action2soundambientawaregenerationofactionsoundsfromegocentricvideos/images.zip +3 -0
- action2soundambientawaregenerationofactionsoundsfromegocentricvideos/layout.json +3 -0
- actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/1c1c678d-9f58-4921-b267-ac0f538d6306_content_list.json +3 -0
- actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/1c1c678d-9f58-4921-b267-ac0f538d6306_model.json +3 -0
- actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/1c1c678d-9f58-4921-b267-ac0f538d6306_origin.pdf +3 -0
- actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/full.md +285 -0
- actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/images.zip +3 -0
- actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/layout.json +3 -0
- actionvosactionsaspromptsforvideoobjectsegmentation/7dd79ec8-6b34-4132-a4ff-2ed3124bdaf0_content_list.json +3 -0
- actionvosactionsaspromptsforvideoobjectsegmentation/7dd79ec8-6b34-4132-a4ff-2ed3124bdaf0_model.json +3 -0
abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/96522f6d-42ec-4ef7-aa06-aa49f7a9d433_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:477ece3d747547398df6c03e3763279b5e7890ff8964e54627f347848c60ab72
|
| 3 |
+
size 75459
|
abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/96522f6d-42ec-4ef7-aa06-aa49f7a9d433_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe4c47437cb3614c94b75d73ea0dcccc5687ae6ba2ee159eca7ad5cb1930a46e
|
| 3 |
+
size 87421
|
abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/96522f6d-42ec-4ef7-aa06-aa49f7a9d433_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0972df46276dfeb77c034f77d962fdbdf366e2df35a05c2753a7dd9ef2caa4ef
|
| 3 |
+
size 5972643
|
abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/full.md
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ABC Easy as 123: A Blind Counter for Exemplar-Free Multi-Class Class-agnostic Counting
|
| 2 |
+
|
| 3 |
+
Michael Hobley and Victor Prisacariu
|
| 4 |
+
|
| 5 |
+
Active Vision Laboratory, University of Oxford [mahobley, victor]@robots.ox.ac.uk
|
| 6 |
+
|
| 7 |
+
Abstract. Class-agnostic counting methods enumerate objects of an arbitrary class, providing tremendous utility in many fields. Prior works have limited usefulness as they require either a set of examples of the type to be counted or that the query image contains only a single type of object. A significant factor in these shortcomings is the lack of a dataset to properly address counting in settings with more than one kind of object present. To address these issues, we propose the first Multi-class, Class-Agnostic Counting dataset (MCAC) and A Blind Counter (ABC123), a method that can count multiple types of objects simultaneously without using examples of type during training or inference. ABC123 introduces a new paradigm where instead of requiring exemplars to guide the enumeration, examples are found after the counting stage to help a user understand the generated outputs. We show that ABC123 outperforms contemporary methods on MCAC without needing human in-the-loop annotations. We also show that this performance transfers to FSC147, the standard class-agnostic counting dataset. MCAC is available at MCAC.active.vision and ABC123 is available at ABC123.active.vision
|
| 8 |
+
|
| 9 |
+
# 1 Introduction
|
| 10 |
+
|
| 11 |
+
Given an image and told to 'count', a person would generally understand the intended task and complete it with accuracy even if there are multiple previously unseen classes of object present. This natural human ability to count arbitrarily has not been modelled by today's methods. Most automated counting methods are class-specific [1,6], counting objects of classes that were present during training. These methods are not generalisable and require retraining for each new type of object. Class-agnostic methods [19,21] can count objects of an arbitrary type removing the need for retraining. However, they usually require an exemplar image as a prior on the class to count and only count a single class at a time. Recently, exemplar-free or zero-shot class-agnostic counting methods [7,18] have been developed that do away with the need for exemplars to define type removing the need for human intervention during deployment. These methods either perform poorly [18] or are bounded to images that only contain a single class of object [4,7].
|
| 12 |
+
|
| 13 |
+

|
| 14 |
+
Fig.1: ABC123 counts objects of multiple unseen types. Not only does our method not need exemplars to define the type to count, it finds examples of each type it has counted.
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
Fig.2: MCAC Contains images with up to 4 classes and up to 300 instances per class. All objects have associated instance labels, class labels, bounding boxes, centre points, and occlusion percentages.
|
| 18 |
+
|
| 19 |
+
We propose ABC123, a transformer-based, multi-class class-agnostic counter which does not need exemplars during training or inference. ABC123 achieves this by first regressing density maps for each type present then enumerating the instances using integration. As it is sometimes difficult to interpret what has been counted given only a density map and count, we design an example discovery stage which locates instances of the counted object.
|
| 20 |
+
|
| 21 |
+
A significant factor in the limitations of the current methods is the lack of a dataset for class-agnostic counting that includes images with more than one class present, as currently they all focus on the single-class scenario. In order to train and evaluate our method, as well as other methods in multi-class settings we introduce MCAC, a new synthetic multi-class class-agnostic counting dataset. We show that methods previously assumed to work in multi-class settings perform poorly on MCAC and that ABC123 significantly outperforms them while also generalising to other datasets.
|
| 22 |
+
|
| 23 |
+
Our main contributions are:
|
| 24 |
+
|
| 25 |
+
- We propose ABC123, the first exemplar-free multi-class class-agnostic counter and show it tackles multi-class counting effectively.
|
| 26 |
+
- We introduced MCAC, the first multi-class class-agnostic counting dataset and use it to demonstrate prior methods do not perform as expected in multi-class settings.
|
| 27 |
+
- We introduce the idea of example finding to exemplar-free counting and demonstrate its utility in aiding a user in understanding what has been counted.
|
| 28 |
+
|
| 29 |
+
In the remainder of this paper, we outline the relevant prior work in Section 2 before introducing MCAC in Section 3. In Section 4, we introduce ABC and detail our experimental setup. Section 5 presents our state-of-the-art results in single- and multi-class class-agnostic settings. Finally, we summarise the impact of the work as a whole.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
Class-specific counting methods aim to enumerate the instances of a single or small set of known classes [1,6,9,26]. These methods struggle to adapt to novel classes, needing specific data and training for each type of object. To address these issues, Lu et al. [14] proposed class-agnostic counting, a framework where inference-time classes are not present during training. Still, most class-agnostic methods [19, 21, 22, 28], require exemplar images of the test-time class. These methods generally work by creating a sufficiently general feature space and applying some form of matching to regress a density map of the counted objects.
|
| 34 |
+
|
| 35 |
+
Recent works, RepRPN [18], CounTR [13], ZSC [27], CLIP-count [10], RCC [7] and LOCA [4] do away with exemplar images at inference-time, removing the need for intervention during deployment. RepRPN is a two-step method which proposes regions likely to contain an object of interest and then uses them for an exemplar-based density map regression method. It proposes more than one bounding box and enumerates them separately. RepRPN performs poorly in comparison to other contemporary methods. ZSC [27] uses a multi-stage process in which a text input is used to generate a generic image of the type to be counted that is then used to find exemplar patches. These exemplar patches then act as the input to an exemplar-based method [21]. CounTR [13] uses a large vision-transformer encoder-decoder to regress a density map of instance locations. It is trained in a mixed few/zero-shot way, applying understanding gained from exemplar-based examples to exemplar-free cases. LOCA [4] also uses a vision transformer backbone and can perform both few- and zero-shot counting. LOCA separately extract the shape and appearance of exemplar and non-exemplar objects to create a more informed object prototype. This prototype is then matched to areas of the image to generate a density map prediction.
|
| 36 |
+
|
| 37 |
+
It has been assumed that current exemplar-based methods can function in multi-class settings. However, this has not been proven rigorously as the main dataset for class-agnostic counting (FSC-133/147 [7, 19]) contains only one labelled class per image. In fact, we show in Sec. 5 that these methods perform poorly in contexts with multiple types present. FSC-133/147 being single-class has also explicitly motivated work such as RCC, which regresses a single scalar count from an image. It is trained without exemplar images and uses scalar supervision instead of density maps. Even with the constraint of only counting one kind of object and with no further direction on the type to count, RCC achieves competitive results with exemplar-based methods on FSC-147, showing the limitations of this dataset.
|
| 38 |
+
|
| 39 |
+
While large models with image inputs [16] like SAM [11] would seem to be able to effectively count objects of arbitrary types, in fact these methods have poor numerical understanding [15], and perform unsatisfactorily on counting tasks especially when images have small objects or a high density of objects. Recently, Paiss et al. [17] attempted to utilise multi-modal deep learning to count. Specifically, they introduce a method that teaches a CLIP model a coherent understanding of words related to counting. However, they are only able to achieve this up to 10 objects.
|
| 40 |
+
|
| 41 |
+
# 3 MCAC Dataset
|
| 42 |
+
|
| 43 |
+
There are currently no datasets suitable for class class-agnostic counting problems with multiple types of object present at once. This significantly impacts the research into addressing these tasks. In order to facilitate the development of multi-class class-agnostic counting methods as well as the evaluation of prior work, we introduce MCAC, the first multi-class class-agnostic counting dataset.
|
| 44 |
+
|
| 45 |
+
While the deployment query scenario, 'counting given an unlabelled image of objects', is natural, the training and quantitative evaluation of methods to address it is not. To facilitate training and evaluation of methods in multi-class settings, we need images with multiple objects of multiple types. To evaluate a methods generalisability to unseen object types, the classes present in the images need to be mutually exclusive between training, validation and testing. It is infeasible to gather natural images with (a) a wide variety of classes, (b) a wide variety of the number of times an object appears in an image, and (c) no repetition of the types of object between the train, test, and validation splits. Using synthetic images allows the above constraints to be satisfied while also providing a high level of precision and accuracy in the labels for each image. As shown in Sec. 5.2, the understanding gained from training on synthetic data is general enough to apply to standard photographic datasets.
|
| 46 |
+
|
| 47 |
+
MCAC contains images with between 1 and 4 classes of object (mean of 1.75) and between 1 and 300 instances per class (mean of 47.66). MCAC has three data splits: training with 4756 images drawn from 287 classes; validation 2413 images drawn from 37 classes, and testing with 2114 images drawn from 19 classes. MCAC-M1 is the single-class subset of the MCAC images which have only one class present per image. MCAC-M1 totals 4259 images, with a mean of 114.89 instances per image. These distributions were designed to replicate that of real-world counting tasks.
|
| 48 |
+
|
| 49 |
+
All instances in an image have associated class labels, model labels, center coordinates, bounding box coordinates, segmentation maps, unoccluded segmentation maps, and occlusion percentages. The occlusion percentage is calculated as $1 - \frac{A_0}{A_1}$ , where $A_0$ is the number of pixels in the final image and $A_1$ is the number of pixels that would be seen if the object was unoccluded and completely within the bounds of the image.
|
| 50 |
+
|
| 51 |
+
Objects are 'dropped' into the scene, ensuring random locations and orientations. As objects in real settings often vary in size, we vary the size of objects by $\pm 50\%$ from a random nominal size. We also vary the number, location, and intensity of lights present. Models and textures are drawn from ShapeNetSem [20].
|
| 52 |
+
|
| 53 |
+
Both exemplar-based and exemplar-free methods bump into problems of ambiguity. If there are objects of varied levels of generality, which boundary should be used? For example, on a chess board with a single white pawn as the exemplar, should the count be of all the pieces, all the white pieces, all the white pawns, all the pawns, and so on? Figure 8 shows examples from FSC-147 of cases with an ambiguity of what is to be counted.
|
| 54 |
+
|
| 55 |
+
Given the infeasibility of defining every possible way of grouping the objects present in an image, we define a single way of grouping the objects: an identical
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
Fig. 3: The ABC123 pipeline. Our method learns to count objects of multiple novel classes without needing exemplar images. During training and quantitative evaluation, the matcher aligns the unguided predictions to the ground truth labels. To aid a user in understanding the results, the example prediction stage locates instances associated with each generated count.
|
| 59 |
+
|
| 60 |
+
mesh and texture, independent of size or orientation. We do, however, acknowledge the existence of other valid-but-unknown counts, the unlabelled ways of grouping the objects.
|
| 61 |
+
|
| 62 |
+
# 4 Method
|
| 63 |
+
|
| 64 |
+
Our method, ABC123, takes an image with multiple instances of objects of multiple types and regresses the count of each type. This is achieved blind, i.e. on objects of arbitrary classes with no requirement to have seen the object class during training or to have an exemplar image to define the type during inference. We achieve this by first regressing density maps for each type then enumerating the instances using integration. To facilitate training and evaluating ABC123 in an exemplar-free way, we propose a matching stage. To further increase the interpretability of the outputs of ABC123, we design an example discovery stage which finds specific instances of the counted object. The pipeline of ABC123 is presented in Fig. 3.
|
| 65 |
+
|
| 66 |
+
# 4.1 Density Map Regression
|
| 67 |
+
|
| 68 |
+
For each image, there are $m$ classes present, each with an associated ground truth count $y$ and density map $d$ . We regress $\hat{m}$ counts and density map predictions, $\hat{y}$ and $\hat{d}$ respectively. $\hat{m}$ acts as upper-bound of the number of counts ABC123 can regress.
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\hat {y} = \sum_ {h, w} \hat {d} _ {(h, w)} \tag {1}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
where $\hat{d}_{(h,w)}$ denotes the density value for pixel (h, w). We achieve this by using $\hat{m}$ convolutional up-sampling heads on top of a vision transformer backbone [5]. We use a vision transformer backbone due to its globally receptive field and self-attention mechanism, which Hobley and Prisacariu [7] showed is helpful to generating a complex understanding in counting settings. Each head regresses a single pixel-wise density map prediction and count prediction from a patch-wise low-resolution high-dimensional feature space. Similar to other contemporary methods [12, 13, 19, 28], we use the pixel-wise error $||d - \hat{d}||_1$ as our loss, where $d$ and $\hat{d}$ are the ground truth and predicted density maps.
|
| 75 |
+
|
| 76 |
+
# 4.2 Matching
|
| 77 |
+
|
| 78 |
+
In single-class or exemplar-based settings, there is a single prediction-label pair. However, in multi-class exemplar-free settings, there are multiple predictions as well as multiple labels, without a clearly defined pairing. This resembles other open-set problems like class-discovery [24, 29] and clustering [8, 25], where the number and cardinality of new classes is not necessarily known. In keeping with these fields and to facilitate training and quantitative evaluation, we find correspondences between the set of $m$ known counts and the set of $\hat{m}$ predicted counts. The correspondence matrix is defined as $\mathcal{X} = \{0,1\}^{\hat{m}\times m}$ where $\mathcal{X}_{i,j} = 1$ iff prediction $i$ is assigned to label $j$ . A problem instance is described by an $\hat{m}\times m$ cost matrix $\mathcal{C}$ , where $\mathcal{C}_{i,j}$ is the cost of matching prediction $i$ and ground truth label $j$ . The goal is to find the complete assignment of predictions to labels of minimal cost. Formally, the optimal assignment has cost
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
\min _ {\mathcal {X}} \sum_ {i = 0} ^ {m} \sum_ {j = 0} ^ {\hat {m}} \mathcal {C} _ {i, j} \cdot \mathcal {X} _ {i, j} \tag {2}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
Specifically, our cost function is defined as the pixel-wise distance of the normalised ground truth density map $d_{i}$ and the predicted density map $\hat{d}_j$ .
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\mathcal {C} _ {i, j} = \left| \left| \frac {d _ {i}}{| | d _ {i} | | _ {2}} - \frac {\hat {d} _ {j}}{| | \hat {d} _ {j} | | _ {2}} \right| \right| _ {2} \tag {3}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
The normalisation ensures the matching is done on the locality of the counted objects rather than the magnitude of the prediction itself. We use the Hungarian algorithm, specifically the Jonker-Volgenant algorithm outlined in Crouse [3], to solve for $\mathcal{X}$ robustly.
|
| 91 |
+
|
| 92 |
+
The supervision loss for each image is the sum of the $\mathrm{L}_1$ difference of the ground truth density maps and their matched predictions as:
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
\mathcal {L} = \sum_ {i, j} ^ {m, \tilde {m}} | | d _ {i} - \hat {d} _ {j} | | _ {1} \cdot \mathcal {X} _ {i, j} \tag {4}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
It should be noted that every label has an associated prediction, but the inverse is not the case as generally $\hat{m} > m$ . This means we do not impose a
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
Fig. 4: Example Finding. Instead of using exemplars to define the count, we count 'blind' and then find meaningful bounding boxes to aids a user in understanding what has been counted. The examples are found using the query image, learnt features, our regressed density maps and a SAM network.
|
| 102 |
+
|
| 103 |
+
loss on the unmatched density maps. This allows the network to generate more nuanced count definitions as it does not punish valid-but-unknown counts which are likely present in any counting setting. As is usual [24, 25, 29], we use the same matching procedure to evaluate our performance at inference-time. However, during deployment, when there are no ground-truth density maps, the matching is both unnecessary and impossible. We instead combine any similar predictions, remove predictions of zero, and present the user with the predictions.
|
| 104 |
+
|
| 105 |
+
# 4.3 Example Discovery
|
| 106 |
+
|
| 107 |
+
While exemplar-free counting saves a user time, as no manual intervention is required, it does require the user to interpret the results. A set of scalar counts or density maps can be unclear as it is not always obvious which count corresponds to which type of object in the input image, especially in high density situations; see Figure 4. To aid the user in understanding to which class a generated count corresponds, we propose flipping the usual exemplar-based paradigm. Instead of using exemplar images to define the type to count, we find examples of the type that was counted.
|
| 108 |
+
|
| 109 |
+
To find examples corresponding to a given count, we first find example points which are high in the corresponding density map while low in the others. To increase the diversity of the found examples we select the points with the largest latent feature distance. We use these points as seed inputs for a pre-trained segmentation method, SAM [11]. The end user is presented with cropped areas of the query image centred on these segmentations, as in Fig. 4.
|
| 110 |
+
|
| 111 |
+
# 4.4 Experiments
|
| 112 |
+
|
| 113 |
+
We use a ViT-Small [5] backbone due to its lightweight nature and for comparison to methods that use the ResNet-50 backbone, such as FamNet and BMNet [19,21]. ViT-S has a similar number of parameters (21M vs 23M), throughput (1237im/sec vs 1007im/sec), and supervised ImageNet performance (79.3% vs 79.8%) as ResNet-50 [23]. Our choice of ViT-S for ABC123 limits the input
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Fig.5: Comparison to other methods on MCAC. ABC123 produces more accurate results than the exemplar-based methods without using exemplar images. The ground truth (GT) and predicted counts are shown in the top right corner of their respective density maps.
|
| 117 |
+
|
| 118 |
+
resolution to $(224\times 224)$ as opposed to the $(\geq 384\times 384)$ resolution used by the method we compare to with ResNet-50 or larger ViT backbones. The effect of using ResNet or ViT-S backbones in various counting methods was discussed in detail in Hobley and Prisacariu [7] where they found that neither was inherently superior and the performance varied dependant on the method.
|
| 119 |
+
|
| 120 |
+
Since vision transformers typically demand substantial training data, we initialise our transformer backbone with weights from Caron et al. [2]. This self-supervised pre-training endows the network with an understanding of meaningful image features prior to exposure to our dataset and without supervision. This reduces the risk of overfitting when the model is then trained on MCAC.
|
| 121 |
+
|
| 122 |
+
Our counting heads, comprised of 3 Conv-ReLU-Upsample blocks, increase the patch-wise resolution of the trained counting features from $k \times (28 \times 28)$ to a pixel-wise density map prediction of $\hat{m} \times (224 \times 224)$ , where $k$ is the dimensionality of the transformer features and $\hat{m}$ is the number of predicted counts. For ABC123, $k = 384$ . We set $\hat{m} = 5$ to ensure there is the capacity to generate a count per defined class in MCAC and at least one valid-but-unknown count.
|
| 123 |
+
|
| 124 |
+
ABC123 trains in less than eight hours using two 1080Tis. It takes less than two hours to train just the head with a frozen backbone (ABC123*). During training we use an Adam optimiser, a batch-size of 2 and a learning rate of $3 * 10^{-5}$ which halves every 35 epochs for a total of 100 epochs. The example discovery stage uses a frozen pretrained ViT-B SAM model [11].
|
| 125 |
+
|
| 126 |
+
Here we lay out our usage of MCAC, which we used to generate our results. We recommend future works use a similar approach and we will release code for a PyTorch dataset to enable easy adoption. We exclude objects that are more than $70\%$ occluded by either other objects or the edge of the frame.
|
| 127 |
+
|
| 128 |
+
MCAC enables the use of true pixel-wise density maps. While this increases the accuracy of our method, we found this significantly decreased the performance of other methods, especially those with test-time adaptations. For fairness, we use the standard [12, 13, 19, 28] pseudo-density-map for all methods,
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
Fig.6: Results of our method on images with 4 classes. ABC123 is able to generate accurate counts and meaningful density maps from images with four novel classes. MCAC has between one and four classes of object per image.
|
| 132 |
+
|
| 133 |
+

|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
|
| 139 |
+
including our own. These are generated by placing a Gaussian kernel centred on the center pixel of each object.
|
| 140 |
+
|
| 141 |
+
When training exemplar-based methods, we take bounding boxes randomly from instances with less than $30\%$ occlusion. We evaluate these methods using the bounding boxes of the three least occluded instances.
|
| 142 |
+
|
| 143 |
+
# 5 Results
|
| 144 |
+
|
| 145 |
+
We evaluate our method against two trivial baseline methods, predicting the training-set mean or median count for all inference images. As there are no previous multi-class exemplar-free class-agnostic counting methods, we compare ABC123 to exemplar-based methods using separate exemplars from each of the classes present. We compare our method to FamNet [19], BMNet [21] CounTR [13] and LOCA [4] on MCAC and FSC-133/147 as these are the current state-of-the-art methods with publicly available implementations. Additionally, we also compare to RCC [7] and CounTR in its zero-shot configuration on MCAC-M1, the subset of MCAC with only a single type of object present per image.
|
| 146 |
+
|
| 147 |
+
As in Xu et al. [27], we use Mean Absolute Error (MAE), Root Mean Squared Error (RMSE), Normalized Absolute Error (NAE), and Squared Relative Error (SRE) to evaluate the performance of each method.
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\mathrm {M A E} = \frac {1}{n m} \sum_ {j = 1} ^ {n} \sum_ {i = 1} ^ {m _ {j}} \left| y _ {i} - \hat {y} _ {i} \right|, \quad \text {R M S E} = \sqrt {\frac {1}{n m} \sum_ {j = 1} ^ {n} \sum_ {i = 1} ^ {m _ {j}} \left(y _ {i} - \hat {y} _ {i}\right) ^ {2}} \tag {5}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
\mathrm {N A E} = \frac {1}{n m} \sum_ {j = 1} ^ {n} \sum_ {i = 1} ^ {m _ {j}} \frac {\left| y _ {i} - \hat {y _ {i}} \right|}{y _ {i}}, \quad \text {S R E} = \sqrt {\frac {1}{n m} \sum_ {j = 1} ^ {n} \sum_ {i = 1} ^ {m _ {j}} \frac {\left(y _ {i} - \hat {y _ {i}}\right) ^ {2}}{y _ {i}}} \tag {6}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
where $n$ is the number of test images, $m_j$ is the number of classes in image $j$ , $y_i$ and $\hat{y}_i$ are the ground truth and predicted number of objects of class $i$ in image $j$ .
|
| 158 |
+
|
| 159 |
+
# 5.1 MCAC
|
| 160 |
+
|
| 161 |
+
We achieve significantly better results compared to FamNet, BMNet, CounTR and LOCA on MCAC both quantitatively and qualitatively without needing exemplars; see Tab. 1 for results and Fig. 5 for comparative examples. As seen in Fig. 5, FamNet often fails to discriminate between objects of different classes when they are visually similar or in high density applications. Both quantitatively and qualitatively, BMNet and CounTR outperform FamNet. However, in many cases, they appear to count the 'most obvious' objects in the image regardless of the provided exemplar images. This behaviour is present but is less prevalent with LOCA. Our method performs well on images with up to 4 classes even when they have high intra-class appearance variation, such as having different colours on different sides, and low inter-class variation; see Fig. 6. A downside to current exemplar-based class-agnostic counting methods is that while they have some multi-class capabilities, they all take a single exemplar at a time and produce only one count. This is slow and inefficient as compared to our approach which generates all counts simultaneously.
|
| 162 |
+
|
| 163 |
+
As would be expected, the performance of all methods improves when evaluating on MCAC-M1, the images from MCAC with only a single class present; see Tab. 2. This is due to a lack of ambiguity of the type to be counted. This was more significant when the methods were trained on MCAC-M1 instead of MCAC. In this training configuration, the methods generally learnt a broader definition of similarity as there was no chance they would accidentally combine classes or count instances from another class. RCC performs well on MCAC-M1, showing the strength of the simple count-wise loss in cases where there is little ambiguity as to what is to be counted. In contrast to other methods, ABC123 trained on MCAC-M1 has similar performance to when it is trained on the full MCAC dataset, demonstrating that it avoids issues concerning intra-class variance and combining classes. Training ABC123 with only a single head ( $\hat{m} = 1$ ) and no matching stage has very similar performance to using its default ( $\hat{m} = 5$ ) configuration with a matching stage. This increases our belief that the matching head does not provide an unfair advantage to our method's quantitative results.
|
| 164 |
+
|
| 165 |
+
# 5.2 Applicability to FSC-147/133
|
| 166 |
+
|
| 167 |
+
ABC123 trained on only MCAC, a synthetic dataset, produces accurate results and outperforms other contemporary methods when applied to FSC-133/147,
|
| 168 |
+
|
| 169 |
+
Table 1: Comparison to SOTA methods on MCAC. We significantly outperform methods which use exemplar images and test-time adaptation without requiring them. ABC123* denotes our method trained with a frozen pre-trained backbone.
|
| 170 |
+
|
| 171 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Shots</td><td colspan="4">Val Set</td><td colspan="4">Test Set</td></tr><tr><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td></tr><tr><td>Mean</td><td>N/A</td><td>39.87</td><td>53.56</td><td>3.07</td><td>11.40</td><td>42.67</td><td>59.68</td><td>2.79</td><td>10.93</td></tr><tr><td>Median</td><td>N/A</td><td>36.25</td><td>58.15</td><td>1.51</td><td>6.70</td><td>39.81</td><td>65.36</td><td>1.38</td><td>6.73</td></tr><tr><td colspan="10">Exemplar-based</td></tr><tr><td>FamNet+ [19]</td><td>3</td><td>24.76</td><td>41.12</td><td>1.12</td><td>6.86</td><td>26.40</td><td>45.52</td><td>1.04</td><td>6.87</td></tr><tr><td>BMNet+ [21]</td><td>3</td><td>15.83</td><td>27.07</td><td>0.71</td><td>4.97</td><td>17.29</td><td>29.83</td><td>0.75</td><td>6.08</td></tr><tr><td>CounTR [13]</td><td>3</td><td>15.07</td><td>26.26</td><td>0.63</td><td>4.79</td><td>16.12</td><td>29.28</td><td>0.67</td><td>5.71</td></tr><tr><td>LOCA [4]</td><td>3</td><td>10.45</td><td>20.81</td><td>0.43</td><td>4.18</td><td>10.91</td><td>22.04</td><td>0.37</td><td>4.05</td></tr><tr><td colspan="10">Exemplar-free</td></tr><tr><td>ABC123 *</td><td>0</td><td>14.64</td><td>23.67</td><td>0.46</td><td>2.97</td><td>15.76</td><td>25.72</td><td>0.45</td><td>3.11</td></tr><tr><td>ABC123</td><td>0</td><td>8.96</td><td>15.93</td><td>0.29</td><td>2.02</td><td>9.52</td><td>17.64</td><td>0.28</td><td>2.23</td></tr></table>
|
| 172 |
+
|
| 173 |
+
the standard, more complex, photographic dataset, as seen in Figure 7 and Table 3. As the standard benchmark evaluation metrics rely on absolute count error they are all very sensitive to even a small number of very dense images. This phenomenon was discussed in Hobley and Prisacariu [7]. We found that the errors on the few images with counts between 300 (the largest count in MCAC) and 3000 (the largest count in FSC) corrupted the metrics, making comparison to other literature more difficult. For this reason these images are excluded from the quantitative evaluation in Table 3. These exclusions amount to $3.0\%$ of the validation set and $1.1\%$ of the test set. It should be noted that the relative rankings of the methods with and without this exclusion remain the same.
|
| 174 |
+
|
| 175 |
+
While ABC123 performs well on FSC, it often finds valid-but-unknown counts. As seen in Fig. 8, the generated counts are correct for the type of object counted, but the type counted may not be aligned with the labels in the original dataset. Classes are often divided into sub-classes, and unlabelled classes are discovered.
|
| 176 |
+
|
| 177 |
+
This is due to a difference in definition of what is similar. MCAC associates a count with objects of the same mesh and texture, however, FSC is labelled by hand uses high-level semantic understanding so often groups objects with significantly different geometries, colours, or textures. Interestingly, an unguided segmentation method [11], which identifies instances' relations often finds the same class divisions as ABC123, shown in Figure 8. To generate quantitative results, we borrow the approach of other open set methods [8,11,15], combining sub-classes. We perform this mapping by combining the density maps of subclass counts, either by the summation of both the separate counts or by trying to combine the density maps using the maximum density at a given point and then counting the instances. The maximum density map configuration produces results which are competitive with other contemporary methods while the density map summation is clearly SOTA, as presented in Table 3. The two approaches only differ in cases where sub-class density maps overlap.
|
| 178 |
+
|
| 179 |
+
Table 2: Comparison to SOTA methods on MCAC-M1. MCAC-M1 is the subset of MCAC with only one class present per image. Methods are either trained on the full multi-class dataset $(\checkmark)$ or MCAC-M1 $(X)$ . 'm' denotes the number of predictions the method generates per query and 'Shots' denotes the number of exemplar images per query at inference time. $\mathrm{CounTR\dagger}$ is an exemplar-free adaption of $\mathrm{CounTR}$ as in Hobley and Prisacariu [7]. ABC123 outperforms other methods when trained in single or multi-class settings.
|
| 180 |
+
|
| 181 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Multi-Class Training Shots</td><td rowspan="2">m̂</td><td colspan="4">Val Set</td><td colspan="4">Test Set</td><td></td></tr><tr><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td><td></td></tr><tr><td>Mean</td><td>N/A</td><td>N/A</td><td>53.36</td><td>67.14</td><td>3.53</td><td>13.46</td><td>58.54</td><td>75.58</td><td>3.37</td><td>13.27</td><td></td></tr><tr><td>Median</td><td>N/A</td><td>N/A</td><td>45.98</td><td>76.64</td><td>1.08</td><td>6.68</td><td>51.35</td><td>86.61</td><td>1.03</td><td>7.00</td><td></td></tr><tr><td colspan="11">Exemplar-based Training</td><td></td></tr><tr><td>FamNet+ [19]</td><td>✓</td><td>3</td><td>1</td><td>24.97</td><td>48.63</td><td>0.36</td><td>3.79</td><td>28.31</td><td>54.88</td><td>0.35</td><td>3.97</td></tr><tr><td>FamNet+ [19]</td><td>X</td><td>3</td><td>1</td><td>12.54</td><td>24.69</td><td>0.37</td><td>4.71</td><td>13.97</td><td>26.19</td><td>0.25</td><td>2.12</td></tr><tr><td>BMNet+ [21]</td><td>✓</td><td>3</td><td>1</td><td>11.70</td><td>23.08</td><td>0.26</td><td>2.39</td><td>11.57</td><td>22.25</td><td>0.24</td><td>1.96</td></tr><tr><td>BMNet+ [21]</td><td>X</td><td>3</td><td>1</td><td>6.82</td><td>12.84</td><td>0.25</td><td>2.95</td><td>8.05</td><td>14.57</td><td>0.19</td><td>1.43</td></tr><tr><td>CounTR [13]</td><td>✓</td><td>3</td><td>1</td><td>11.44</td><td>21.37</td><td>0.33</td><td>2.36</td><td>10.91</td><td>21.70</td><td>0.29</td><td>2.01</td></tr><tr><td>CounTR [13]</td><td>✓</td><td>0</td><td>1</td><td>13.57</td><td>25.53</td><td>0.30</td><td>2.48</td><td>13.09</td><td>25.72</td><td>0.29</td><td>2.41</td></tr><tr><td>CounTR [13]</td><td>X</td><td>3</td><td>1</td><td>9.00</td><td>16.91</td><td>0.41</td><td>3.56</td><td>9.96</td><td>18.92</td><td>0.38</td><td>2.93</td></tr><tr><td>CounTR [13]</td><td>X</td><td>0</td><td>1</td><td>9.16</td><td>17.13</td><td>0.42</td><td>3.56</td><td>10.10</td><td>19.10</td><td>0.40</td><td>3.02</td></tr><tr><td>LOCA [4]</td><td>✓</td><td>3</td><td>1</td><td>5.62</td><td>12.24</td><td>0.15</td><td>1.73</td><td>6.25</td><td>13.09</td><td>0.12</td><td>1.14</td></tr><tr><td>LOCA [4]</td><td>X</td><td>3</td><td>1</td><td>5.01</td><td>11.47</td><td>0.22</td><td>3.35</td><td>6.52</td><td>13.37</td><td>0.15</td><td>1.36</td></tr><tr><td colspan="11">Exemplar-free Training</td><td></td></tr><tr><td>CounTR† [13]</td><td>X</td><td>0</td><td>1</td><td>11.46</td><td>21.24</td><td>0.35</td><td>2.78</td><td>12.54</td><td>23.84</td><td>0.31</td><td>2.38</td></tr><tr><td>RCC [7]</td><td>X</td><td>0</td><td>1</td><td>7.78</td><td>15.40</td><td>0.24</td><td>2.71</td><td>8.81</td><td>16.92</td><td>0.19</td><td>1.73</td></tr><tr><td>LOCA [4]</td><td>X</td><td>0</td><td>1</td><td>5.46</td><td>11.74</td><td>0.22</td><td>2.90</td><td>6.94</td><td>14.58</td><td>0.19</td><td>1.70</td></tr><tr><td>ABC123 *</td><td>X</td><td>0</td><td>5</td><td>10.78</td><td>18.83</td><td>0.28</td><td>1.97</td><td>13.23</td><td>24.57</td><td>0.29</td><td>2.39</td></tr><tr><td>ABC123 *</td><td>X</td><td>0</td><td>1</td><td>11.38</td><td>19.73</td><td>0.40</td><td>3.51</td><td>14.31</td><td>25.40</td><td>0.37</td><td>2.79</td></tr><tr><td>ABC123 *</td><td>✓</td><td>0</td><td>5</td><td>10.98</td><td>18.85</td><td>0.30</td><td>1.93</td><td>13.13</td><td>23.93</td><td>0.29</td><td>2.18</td></tr><tr><td>ABC123</td><td>X</td><td>0</td><td>5</td><td>5.82</td><td>11.74</td><td>0.15</td><td>1.22</td><td>7.54</td><td>15.30</td><td>0.21</td><td>1.87</td></tr><tr><td>ABC123</td><td>X</td><td>0</td><td>1</td><td>5.85</td><td>12.91</td><td>0.24</td><td>3.37</td><td>7.53</td><td>15.69</td><td>0.22</td><td>2.19</td></tr><tr><td>ABC123</td><td>✓</td><td>0</td><td>5</td><td>6.08</td><td>12.62</td><td>0.16</td><td>1.22</td><td>6.82</td><td>14.70</td><td>0.16</td><td>1.51</td></tr></table>
|
| 182 |
+
|
| 183 |
+
# 5.3 Validating the Number of Predictions
|
| 184 |
+
|
| 185 |
+
It should be noted that as the matching stage uses the ground-truth density maps, it could be used to significantly benefit a method's quantitative results without improving its deployment capabilities. Specifically, a method could generate a high number of diverse counts and use the matching stage to select the best one. We found this to be the case with our method, see Tab. 4 for the complete results. We believe, however, that this does not align with a more useful method in a deployment situation. This numerical gain derives purely from the matching stage, which is not present during deployment. In fact, during deployment, this would correspond to a much more difficult to interpret output as a user would have to figure out which of the many outputs was most relevant. We limit ourselves to regressing 5 predictions to minimise this behaviour while still allowing the network to generate valid-but-unknown counts. We also found that when high numbers of predictions were generated, fewer than half were used,
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
Fig. 7: ABC123 trained on MCAC applied to FSC-147 produces accurate counts. The ground truth and predicted counts are in the top left and top right corners.
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+
i.e. the outputs of some heads were rarely or never picked. This is likely due to these heads not being matched frequently during training so the loss is rarely propagated back through them. There is also significant redundancy between the heads. The predictions of certain heads over the whole dataset were clearly similar and could be grouped. Of the 39 utilised heads when $\hat{m} = 100$ , there were three groups of, respectively, 13, 6, and 4 heads that were very similar, lowering the effective number of utilised heads to 19.
|
| 219 |
+
|
| 220 |
+
# 6 Conclusion
|
| 221 |
+
|
| 222 |
+
In this work, we present ABC123, a multi-class exemplar-free class-agnostic counter, and show that it is superior to prior exemplar-based methods in a multi-class setting. ABC123 requires no human input at inference-time, works in complex settings with more than one kind of object present, and outputs easy to understand information in the form of examples of the counted objects. Due to this, it has potential for deployment in various fields. We also propose MCAC, a multi-class class-agnostic counting dataset, and use it to train our method as well as to demonstrate that exemplar-based counting methods may not be as robust as previously assumed in multi-class settings.
|
| 223 |
+
|
| 224 |
+
Table 3: Comparison to SOTA methods when trained on MCAC and applied to the cases in FSC147 with fewer than 300 objects. Combining sub-class density maps with a sum rather than a max is more effective as it is more accurate in cases where instances of the sub classes are spatially close or overlapping as both instances are counted completely. CounTR $\dagger$ is an exemplar-free modification of CounTR.
|
| 225 |
+
|
| 226 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Shots</td><td rowspan="2">Sub-Class Combine</td><td colspan="4">Val Set</td><td colspan="4">Test Set</td></tr><tr><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td></tr><tr><td colspan="11">Exemplar-based</td></tr><tr><td>FamNet+</td><td>3</td><td>N/A</td><td>25.83</td><td>46.31</td><td>0.50</td><td>4.29</td><td>28.05</td><td>45.59</td><td>0.48</td><td>4.33</td></tr><tr><td>BMNet+</td><td>3</td><td>N/A</td><td>29.47</td><td>53.15</td><td>0.51</td><td>4.72</td><td>30.74</td><td>52.00</td><td>0.47</td><td>4.68</td></tr><tr><td>CounTR</td><td>3</td><td>N/A</td><td>21.22</td><td>40.28</td><td>0.47</td><td>3.85</td><td>21.09</td><td>40.79</td><td>0.38</td><td>3.66</td></tr><tr><td>LOCA</td><td>3</td><td>N/A</td><td>25.70</td><td>48.64</td><td>0.45</td><td>4.30</td><td>29.93</td><td>49.89</td><td>0.48</td><td>4.62</td></tr><tr><td colspan="11">Exemplar-free</td></tr><tr><td>CounTR †</td><td>0</td><td>N/A</td><td>23.50</td><td>45.83</td><td>0.42</td><td>4.06</td><td>23.57</td><td>42.00</td><td>0.39</td><td>3.85</td></tr><tr><td>LOCA</td><td>0</td><td>N/A</td><td>29.37</td><td>54.01</td><td>0.51</td><td>4.84</td><td>33.96</td><td>56.66</td><td>0.53</td><td>5.17</td></tr><tr><td>ABC123</td><td>0</td><td>Max</td><td>19.56</td><td>46.71</td><td>0.20</td><td>3.54</td><td>22.43</td><td>47.35</td><td>0.22</td><td>3.70</td></tr><tr><td>ABC123</td><td>0</td><td>Sum</td><td>11.13</td><td>34.47</td><td>0.12</td><td>2.44</td><td>11.75</td><td>33.41</td><td>0.11</td><td>2.38</td></tr></table>
|
| 227 |
+
|
| 228 |
+
Table 4: Effect of using more prediction heads. Increasing the number of pre-matching predictions improves the quantitative results of our method. However, as the number of heads increases, the percentage of heads that are frequently (>0.4%) matched decreases.
|
| 229 |
+
|
| 230 |
+
<table><tr><td rowspan="2">hat</td><td colspan="4">Val Set</td><td colspan="4">Test Set</td><td rowspan="2">Head Utilisation</td></tr><tr><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td><td>MAE</td><td>RMSE</td><td>NAE</td><td>SRE</td></tr><tr><td>4</td><td>9.43</td><td>17.42</td><td>0.31</td><td>2.57</td><td>10.19</td><td>19.44</td><td>0.33</td><td>2.81</td><td>100%</td></tr><tr><td>5</td><td>8.96</td><td>15.93</td><td>0.28</td><td>2.02</td><td>9.52</td><td>17.64</td><td>0.28</td><td>2.23</td><td>100%</td></tr><tr><td>10</td><td>8.39</td><td>14.93</td><td>0.28</td><td>1.97</td><td>9.08</td><td>16.96</td><td>0.27</td><td>2.15</td><td>100%</td></tr><tr><td>20</td><td>7.78</td><td>13.75</td><td>0.26</td><td>1.82</td><td>8.29</td><td>15.53</td><td>0.24</td><td>1.95</td><td>85%</td></tr><tr><td>50</td><td>7.26</td><td>12.80</td><td>0.25</td><td>1.69</td><td>7.99</td><td>15.14</td><td>0.24</td><td>1.81</td><td>68%</td></tr><tr><td>100</td><td>7.11</td><td>12.81</td><td>0.23</td><td>1.59</td><td>7.43</td><td>14.48</td><td>0.21</td><td>1.72</td><td>39%</td></tr></table>
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
Fig. 8: ABC123 trained on MCAC applied to Ambiguous Images in FSC-147. In cases with class ambiguity, ABC123 often finds valid-but-unknown counts, i.e. they don't align with the human annotations. Similar to an unguided segmentation method (right), ABC123 discovers unlabelled classes, divides labelled classes into sub classes, or counts component parts of objects. The unguided segmentations are coloured by latent feature to demonstrate how they would be grouped.
|
| 234 |
+
|
| 235 |
+
# References
|
| 236 |
+
|
| 237 |
+
1. Cao, X., Wang, Z., Zhao, Y., Su, F.: Scale aggregation network for accurate and efficient crowd counting. In: Proceedings of the European conference on computer vision (ECCV). pp. 734-750 (2018)
|
| 238 |
+
2. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9650-9660 (2021)
|
| 239 |
+
3. Crouse, D.F.: On implementing 2d rectangular assignment algorithms. IEEE Transactions on Aerospace and Electronic Systems 52(4), 1679-1696 (2016). https://doi.org/10.1109/TAES.2016.140952
|
| 240 |
+
4. Djukic, N., Lukezic, A., Zavrtanik, V., Kristan, M.: A low-shot object counting network with iterative prototype adaptation. arXiv preprint arXiv:2211.08217 (2023)
|
| 241 |
+
5. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations (2020)
|
| 242 |
+
6. Go, H., Byun, J., Park, B., Choi, M.A., Yoo, S., Kim, C.: Fine-grained multiclass object counting. In: 2021 IEEE International Conference on Image Processing (ICIP). pp. 509-513. IEEE (2021)
|
| 243 |
+
7. Hobley, M., Prisacariu, V.: Learning to count anything: Reference-less class-agnostic counting with weak supervision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (2023)
|
| 244 |
+
8. Hobley, M.A., Prisacariu, V.A.: Dms: Differentiable mean shift for dataset agnostic task specific clustering using side information. arXiv preprint arXiv:2305.18492 (2023)
|
| 245 |
+
9. Hoekendijk, J., Kellenberger, B., Aarts, G., Brasseur, S., Poiesz, S.S., Tuia, D.: Counting using deep learning regression gives value to ecological surveys. Scientific reports 11(1), 1-12 (2021)
|
| 246 |
+
10. Jiang, R., Liu, L., Chen, C.: Clip-count: Towards text-guided zero-shot object counting. arXiv preprint arXiv:2305.07304 (2023)
|
| 247 |
+
1. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., Dollar, P., Girshick, R.: Segment anything. arXiv:2304.02643 (2023)
|
| 248 |
+
2. Lin, H., Hong, X., Wang, Y.: Object counting: You only need to look at one. arXiv preprint arXiv:2112.05993 (2021)
|
| 249 |
+
3. Liu, C., Zhong, Y., Zisserman, A., Xie, W.: Countr: Transformer-based generalised visual counting. arXiv preprint arXiv:2208.13721 (2022)
|
| 250 |
+
4. Lu, E., Xie, W., Zisserman, A.: Class-agnostic counting. In: Asian Conference on Computer Vision (2018)
|
| 251 |
+
5. Ma, Z., Hong, X., Shangguan, Q.: Can sam count anything? an empirical study on sam counting. arXiv preprint arXiv:2304.10817 (2023)
|
| 252 |
+
6. OpenAI: Gpt-4 technical report (2023)
|
| 253 |
+
7. Paiss, R., Ephrat, A., Tov, O., Zada, S., Mosseri, I., Irani, M., Dekel, T.: Teaching clip to count to ten. arXiv preprint arXiv:2302.12066 (2023)
|
| 254 |
+
8. Ranjan, V., Hoai, M.: Exemplar free class agnostic counting. arXiv preprint arXiv:2205.14212 (2022)
|
| 255 |
+
9. Ranjan, V., Sharma, U., Nguyen, T., Hoai, M.: Learning to count everything. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3394-3403 (2021)
|
| 256 |
+
|
| 257 |
+
20. Savva, M., Chang, A.X., Hanrahan, P.: Semantically-Enriched 3D Models for Common-sense Knowledge. CVPR 2015 Workshop on Functionality, Physics, Intentionality and Causality (2015)
|
| 258 |
+
21. Shi, M., Lu, H., Feng, C., Liu, C., Cao, Z.: Represent, compare, and learn: A similarity-aware framework for class-agnostic counting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9529-9538 (2022)
|
| 259 |
+
22. Sokhandan, N., Kamousi, P., Posada, A., Alese, E., Rostamzadeh, N.: A few-shot sequential approach for object counting. arXiv preprint arXiv:2007.01899 (2020)
|
| 260 |
+
23. Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., Jégou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning. pp. 10347-10357. PMLR (2021)
|
| 261 |
+
24. Troisemaine, C., Lemaire, V., Gosselin, S., Reiffers-Masson, A., Flocon-Cholet, J., Vaton, S.: Novel class discovery: an introduction and key concepts. arXiv preprint arXiv:2302.12028 (2023)
|
| 262 |
+
25. Xie, J., Girshick, R., Farhadi, A.: Unsupervised deep embedding for clustering analysis. In: Balcan, M.F., Weinberger, K.Q. (eds.) Proceedings of The 33rd International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 48, pp. 478-487. PMLR, New York, New York, USA (20-22 Jun 2016), https://proceedings.mlr.press/v48/xieb16.htm1
|
| 263 |
+
26. Xie, W., Noble, J.A., Zisserman, A.: Microscopy cell counting and detection with fully convolutional regression networks. Computer methods in biomechanics and biomedical engineering: Imaging & Visualization 6(3), 283-292 (2018)
|
| 264 |
+
27. Xu, J., Le, H., Nguyen, V., Ranjan, V., Samaras, D.: Zero-shot object counting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15548-15557 (2023)
|
| 265 |
+
28. Yang, S.D., Su, H.T., Hsu, W.H., Chen, W.C.: Class-agnostic few-shot object counting. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 870-878 (2021)
|
| 266 |
+
29. Yang, Y., Xu, D., Nie, F., Yan, S., Zhuang, Y.: Image clustering using local discriminant models and global integration. IEEE Transactions on Image Processing 19(10), 2761-2773 (2010). https://doi.org/10.1109/TIP.2010.2049235
|
abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a3cb1ac678dcda0bdbb10136d024d473301000cd18b041a1ace2f754327bbb9d
|
| 3 |
+
size 843460
|
abceasyas123ablindcounterforexemplarfreemulticlassclassagnosticcounting/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e01253f413f6094a13447ccdeab97a9dbd2134d554ec9e721577b7da51a67e6a
|
| 3 |
+
size 335324
|
accdiffusionanaccuratemethodforhigherresolutionimagegeneration/7d58acad-7a35-400a-b95e-3e542e5768b9_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f963a74d96c8d14f259b198e889d90b15d882e7bddea8c4e51a2159b973406f8
|
| 3 |
+
size 87559
|
accdiffusionanaccuratemethodforhigherresolutionimagegeneration/7d58acad-7a35-400a-b95e-3e542e5768b9_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ac6ca2223e009478f08917d61b226ecb3c1491ab83956e2055bb942c30c79dc
|
| 3 |
+
size 108799
|
accdiffusionanaccuratemethodforhigherresolutionimagegeneration/7d58acad-7a35-400a-b95e-3e542e5768b9_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b22adee6ffa0152df2df61ccd2bda2d6d1e6c12467cfb300f39a1d739b237a01
|
| 3 |
+
size 8492722
|
accdiffusionanaccuratemethodforhigherresolutionimagegeneration/full.md
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AccDiffusion: An Accurate Method for Higher-Resolution Image Generation
|
| 2 |
+
|
| 3 |
+
Zhihang Lin $^{1}$ , Mingbao Lin $^{2}$ , Meng Zhao $^{3}$ , and Rongrong Ji $^{1\star}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> Key Laboratory of Multimedia Trusted Perception and Efficient Computing, Ministry of Education of China, Xiamen University, China.
|
| 6 |
+
|
| 7 |
+
Skywork AI.
|
| 8 |
+
|
| 9 |
+
3 Tencent Youtu Lab.
|
| 10 |
+
|
| 11 |
+
zhihanglin@stu.xmu.edu.cn, linmb001@outlook.com, arthurizar8421@gmail.com, rrji@xmu.edu.cn
|
| 12 |
+
|
| 13 |
+
Abstract. This paper attempts to address the object repetition issue in patch-wise higher-resolution image generation. We propose AccDiffusion, an accurate method for patch-wise higher-resolution image generation without training. An in-depth analysis in this paper reveals an identical text prompt for different patches causes repeated object generation, while no prompt compromises the image details. Therefore, our AccDiffusion, for the first time, proposes to decouple the vanilla image-content-aware prompt into a set of patch-content-aware prompts, each of which serves as a more precise description of an image patch. Besides, AccDiffusion also introduces dilated sampling with window interaction for better global consistency in higher-resolution image generation. Experimental comparison with existing methods demonstrates that our AccDiffusion effectively addresses the issue of repeated object generation and leads to better performance in higher-resolution image generation. Our code is released at https://github.com/lzhxmu/AccDiffusion.
|
| 14 |
+
|
| 15 |
+
Keywords: Image Generation $\cdot$ High Resolution $\cdot$ Diffusion Model
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Diffusion models have garnered significant attention and made notable advancements with the emergence of works such as DDPM [10], DDIM [28], ADM [3], and LDMs [21], owing to their outstanding generative ability and wide range of applications. However, stable diffusion models entail tremendous training costs primarily due to the large number of timestamps required and the quadratic relationship between computing costs and resolution. Consequently, it is common to limit the resolution to a relatively low level, such as $512^{2}$ for SD 1.5 [20] and $1024^{2}$ for SDXL [17], during training. Even at such low resolution, stable diffusion 1.5 still entails over 20 days of training on 256 A100 GPUs [20]. Nonetheless, high-resolution generation finds widespread application in real-life scenarios, such as advertisements. The demand for generating high-resolution images clashes with the expensive training costs involved.
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
GPU: 7.87 G
|
| 23 |
+
Prompt: "A cute teddy bear in front of a plain white wall, warm and brown fur, soft and fluffy."
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
(a) SDXL
|
| 27 |
+
GPU: 9.60 G
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
GPU: 9.76 G
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
GPU: 7.87 G
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
GPU:7.87G
|
| 37 |
+
|
| 38 |
+

|
| 39 |
+
GPU: 16.47 G
|
| 40 |
+
(b) Attn-SF
|
| 41 |
+
Fig. 1: Comparison of image quality and GPU overhead for existing higher-resolution generation methods. The GPU memory of Attn-SF [12] and ScaleCrafter [6] significantly increases with resolution, while patch-wise denoising methods, e.g., MultiDiffusion [1] and DemoFusion [4] suffer object repetition issue. Best viewed zoomed in.
|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
GPU: 18.12 G
|
| 45 |
+
(c) ScaleCrafter
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
GPU: 7.87 G
|
| 49 |
+
(d) MultiDiffusion
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
GPU: 7.87 G
|
| 53 |
+
(e) DemoFusion
|
| 54 |
+
|
| 55 |
+
Therefore, researchers have shifted their focus to training stable diffusion models with low resolution and subsequently applying fine-tuning [30, 33] or training-free [1, 4, 6, 14] methods to achieve image generation extrapolation. A naive approach is to directly use pre-trained stable diffusion models to generate higher-resolution images. However, the resulting images from this approach are proved to suffer from issues such as object repetition and inconsistent object structures [4, 12]. Previous methods attempted to achieve image generation extrapolation from the perspectives of attention entropy [12] or the receptive field of stable diffusion model [6]. However, these methods have been proven to be less practical in two folds, as shown in Fig. 1(b,c): (1)a substantial increase in GPU memory consumption [33] as the resolution rises and (2) poor quality of the generated images [4]. Thanks to stable diffusion's outstanding local detail generation ability, recent works [1, 4, 14] have started conducting higher-resolution image generation in a patch-wise fashion for the sake of less GPU memory consumption. Previous works MultiDiffusion [1] and SyncDiffusion [14] fuse multiple overlapped patch-wise denoising results to generate higher-resolution panoramic images without a seam. However, the direct application of these approaches to generate higher-resolution object-centric images leads to repeated and distorted results lacking global semantic coherence, as shown in Fig. 1(d). Recently,
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
(a) Image-content-aware prompt
|
| 59 |
+
Fig. 2: Image-content-aware prompt v.s. Patch-content-aware prompt.
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
(b) Patch-content-aware prompt (Ours)
|
| 63 |
+
|
| 64 |
+
DemoFusion [4] has introduced global semantic information into the patch-wise higher-resolution image generation through residual connection and dilated sampling. It only partially solves the problem of repeated object generation and still exhibits small object repetition in ultra-high image generation as depicted in Fig. 1(e). How to resolve the issue of repeated object generation completely in patch-wise higher-resolution image generation remains an unresolved problem.
|
| 65 |
+
|
| 66 |
+
In this paper, our in-depth analysis of DemoFusion [4] indicates, as illustrated in Fig. 2(a), small object repetition generation is the adversarial outcome of an identical text prompt on all patches, encouraging to generate repeated objects, and global semantic information from residual connection and dilated sampling, suppressing the generation of repeated objects. To address the above issues, we propose AccDiffusion, an accurate method for higher-resolution image generation, with its major novelty in two folds:
|
| 67 |
+
|
| 68 |
+
(1) To completely solve small object repetition, as illustrated in Fig. 2(b), we propose to decouple the vanilla image-content-aware prompt into a set of patch-content-aware substrings, each of which serves as a more precise prompt to describe the patch contents. Specifically, we utilize the cross-attention map from the low-resolution generation process to determine whether a word token should serve as the prompt for a patch. If a word token has a high response in the cross-attention map region corresponding to the patch, it should be included in the prompt, and vice versa.
|
| 69 |
+
(2) Through visualization, we observe that the dilated sampling operation in DemoFusion generates globally inconsistent and noisy information, disrupting the generation of higher-resolution images. Such inconsistency stems from the independent denoising of dilation samples without interaction. To address this, we employ a position-wise bijection function to enable interaction between the noise from different dilation samples. Experimental results show that our dilated sampling with interaction leads to the generation of smoother global semantic information (see Fig. 3(c,d)).
|
| 70 |
+
|
| 71 |
+
We have conducted extensive experiments to verify the effectiveness of AccDiffusion. The qualitative results demonstrate that AccDiffusion effectively addresses the issue of repeated object generation in higher-resolution image generation. And the quantitative results show that AccDiffusion achieves state-of-the-art performance in training-free image generation extrapolation.
|
| 72 |
+
|
| 73 |
+
# 2 Related Work
|
| 74 |
+
|
| 75 |
+
# 2.1 Diffusion Models
|
| 76 |
+
|
| 77 |
+
Diffusion models [3,10,21,28] are generative probabilistic models that transform Gaussian noise into samples through gradual denoising steps. DDPM [10] is a pioneering model that demonstrates impressive image generation capabilities using Markovian forward and reverse processes. Based on DDPM, DDIM [28] utilizes non-Markovian reverse processes to decrease sampling time effectively. Furthermore, LDMs [21] incorporate the diffusion process into the latent space, resulting in efficient training and inference. Subsequently, a series of LDMs-based stable diffusion models are open-sourced and achieve state-of-the-art image synthesis capability. This has led to widespread applications in various downstream generative tasks, including images [3,10,16,22,28], audio [5,11], video [9,26] and 3D objects [15,18,31], etc.
|
| 78 |
+
|
| 79 |
+
# 2.2 Training-Free Higher-Resolution Image Generation
|
| 80 |
+
|
| 81 |
+
Although stable diffusion demonstrates impressive results, its training cost limits low-resolution training and thus generates low-fidelity images when the inference resolution differs from the training resolution [4, 6, 12]. Recent works [1, 4, 6, 12] have attempted to utilize pre-trained diffusion models for generating higher-resolution images. These works [1, 4, 6, 12] can be broadly categorized into two categories: direct generation [6, 12] and indirect generation [1, 4]. Direct generation methods scale the input of the diffusion models to the target resolution and then perform forward and reverse processes directly on the target resolution. These kinds of methods require modifications to the fundamental architecture, such as adjusting the attention scale factor [12] and the receptive field of convolutional kernels [6], to prevent repetition generation. However, the generated images fail to yield the higher-resolution detail desired. Additionally, direct generation methods encounter out-of-memory errors when generating ultra-high resolution images (e.g. 8K) on consumer-grade GPUs, due to the quadratic increase in memory overhead as the latent space size grows. Indirect generation methods generate higher-resolution images through multiple overlapped denoising paths of LDMs and are capable of generating images of any resolution on consumer-grade GPUs. However, these methods [1, 14] suffer from local repetition and structural distortion. Du et al. [4] tried to address repeated generation by introducing global structural information from lower-resolution image.
|
| 82 |
+
|
| 83 |
+
# 3 Method
|
| 84 |
+
|
| 85 |
+
# 3.1 Backgrounds
|
| 86 |
+
|
| 87 |
+
Latent Diffusion Models (LDMs). LDMs [3] apply an autoencoder $\mathcal{E}$ to encode an image $\mathbf{x}_0\in \mathbb{R}^{H\times W\times 3}$ into a latent representation $\mathbf{z}_0 = \mathcal{E}(\mathbf{x}_0)\in$ $\mathbb{R}^{h\times w\times c}$ , where the regular diffusion process is constructed as:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\mathbf {z} _ {t} = \sqrt {\bar {\alpha} _ {t}} \mathbf {z} _ {0} + \sqrt {1 - \bar {\alpha} _ {t}} \varepsilon , \quad \varepsilon \sim \mathcal {N} (0, \mathbf {I}), \tag {1}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $\{\alpha_{t}\}_{t = 1}^{T}$ is a set of prescribed variance schedules and $\bar{\alpha}_t = \Pi_{i = 1}^t\alpha_i$ . To perform conditional sequential denoising, a network $\varepsilon_{\theta}$ is trained to predict added noise, constrained by the following training objective:
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\min _ {\theta} \mathbb {E} _ {\mathcal {E} (x _ {0}), \varepsilon \sim \mathcal {N} (0, 1), t \sim \operatorname {U n i f o r m} (1, T)} \left[ \left\| \varepsilon - \varepsilon_ {\theta} (\mathbf {z} _ {t}, t, \tau_ {\theta} (y)) \right\| _ {2} ^ {2} \right], \tag {2}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
in which $\tau_{\theta}(y) \in \mathbb{R}^{M \times d_{\tau}}$ is an intermediate representation of condition $y$ and $M$ is the number of word tokens in the prompt $y$ . The $\tau_{\theta}(y)$ is then mapped to keys and values in cross-attention of U-Net $\varepsilon_{\theta}$ :
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
Q = W _ {Q} \cdot \varphi (z _ {t}), \quad K = W _ {K} \cdot \tau_ {\theta} (y), \quad V = W _ {V} \cdot \tau_ {\theta} (y),
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\mathcal {M} = \operatorname {S o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {d}}\right), \quad \operatorname {A t t e n t i o n} (Q, K, V) = \mathcal {M} \cdot V. \tag {3}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
Here, for simplicity, we omit the expression of multi-head cross-attention and $\varphi (z_{t})\in \mathbb{R}^{N\times d_{\epsilon}}$ denotes an intermediate representation of noise in the UNet. Here $N = h\times w$ represents the pixel number of the latent noise $z_{t}$ . $W_{Q}\in \mathbb{R}^{d\times d_{\epsilon}},W_{K}\in \mathbb{R}^{d\times d_{\tau}}$ , and $W_{V}\in \mathbb{R}^{d\times d_{\tau}}$ are learnable projection matrices. $\mathcal{M}\in \mathbb{R}^{N\times M}$ is the cross-attention maps.
|
| 110 |
+
|
| 111 |
+
In contrast, the denoising process aims to recover the cleaner version $\mathbf{z}_{t-1}$ from $\mathbf{z}_t$ by estimating the noise, which can be expressed as:
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\mathbf {z} _ {t - 1} = \sqrt {\frac {\alpha_ {t - 1}}{\alpha_ {t}}} \mathbf {z} _ {t} + \left(\sqrt {\frac {1}{\alpha_ {t - 1}} - 1} - \sqrt {\frac {1}{\alpha_ {t}} - 1}\right) \cdot \varepsilon_ {\theta} \big (\mathbf {z} _ {t}, t, \tau_ {\theta} (y) \big). \tag {4}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
During inference, a decoder $\mathcal{D}$ is employed at the end of the denoising process to reconstruct the image from the latent representation $\mathbf{x}_0 = \mathcal{D}(\mathbf{z}_0)$ .
|
| 118 |
+
|
| 119 |
+
Patch-wise Denoising. MultiDiffusion [1] achieve higher-resolution image generation by fusing multiple overlapped denoising patches. In simple terms, given a latent representation $\mathcal{Z}_t \in \mathbb{R}^{h' \times w' \times c}$ of higher-resolution image with $h' > h$ and $w' > h$ , MultiDiffusion utilizes a shifted window to sample patches from $\mathcal{Z}_t$ and results in a series of patch noise $\{\mathbf{z}_t^i\}_{i=1}^{P_1}$ , where $\mathbf{z}_t^i \in \mathbb{R}^{h \times w \times c}$ and $P_1 = \left(\frac{h' - h}{d_h} + 1\right) \times \left(\frac{w' - w}{d_w} + 1\right)$ is the total number of patches, $d_h$ and $d_w$ is the vertical and horizontal stride, respectively. Then, MultiDiffusion performs patch-wise denoising via Eq. (4) and obtains $\{\mathbf{z}_{t-1}^i\}_{i=1}^{P_1}$ . Then $\{\mathbf{z}_{t-1}^i\}_{i=1}^{P_1}$ is reconstructed to get $\mathcal{Z}_{t-1}$ , where the overlapped parts take the average. Eventually, a higher-resolution image can be obtained by directly decoding $\mathcal{Z}_0$ into image $\mathbf{X}_0$ .
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
(a)
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
(b)
|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
(c)
|
| 129 |
+
Fig. 3: Results of higher-resolution image generation. (a) The result of DemoFusion without text prompt. (b) The result of DemoFusion without residual connection and dilated sampling. (c) The result of dilated sampling without window interaction. (d) The result of our dilated sampling with window interaction.
|
| 130 |
+
|
| 131 |
+

|
| 132 |
+
(d)
|
| 133 |
+
|
| 134 |
+
Based on MultiDiffusion, DemoFusion [4] additionally introduces: 1) progressive upscaling to gradually generate higher-resolution images; 2) residual connection to maintain global consistency with the lower-resolution image by injecting the intermediate noise-inversed representation. 3) dilated sampling to enhance global semantic information of higher-resolution images.
|
| 135 |
+
|
| 136 |
+
# 3.2 In-depth Analysis of Small Object Repetition
|
| 137 |
+
|
| 138 |
+
DemoFusion demonstrates the possibility of using pre-trained LDMs to generate higher-resolution images. However, as shown in Fig. 1(e), small object repetition continues to challenge the performance of DemoFusion.
|
| 139 |
+
|
| 140 |
+
Delving into an in-depth analysis, we respectively: 1) remove the text prompt during higher-resolution generation of DemoFusion and the resulting Fig. 3(a) indicates the disappearance of repeated objects but more degradation in details. 2) remove the operations of residual connection & dilated sampling in DemoFusion and the resulting Fig. 3(b) denotes severe large object repetition. Therefore, we can make a safe conclusion that small object repetition is the adversarial outcome of an identical text prompt on all patches and operations of residual connection & dilated sampling. The former encourages to generate repeated objects while the latter suppresses the generation of repeated objects. Consequently, DemoFusion tends to generate small repeated objects.
|
| 141 |
+
|
| 142 |
+
Overall, text prompts play a significant role in image generation. It is not a viable solution to address small object repetition by removing text prompts during the higher-resolution generation, as it would lead to a decline in image quality. Instead, we require more accurate prompts specifically tailored for each patch. That is, if an object is not present in a patch, the corresponding word in the text prompts should not serve as a prompt for that patch.
|
| 143 |
+
|
| 144 |
+
To this end, in Sec. 3.3, we eliminate the restriction of having an identical text prompt for all patches in previous patch-wise generation approaches. Instead, we generate more precise patch-content-aware prompts that adapt to the content of different patches. In Sec. 3.4, we introduce how to enhance the global structure information to generate higher-resolution images without repetition.
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
(a)
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
(b)
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
(c)
|
| 178 |
+
Image
|
| 179 |
+
Fig. 4: Visualization of averaged attention map from the up blocks and down blocks in U-Net. We reshape the attention map into a 2D shape before visualization. (a) Cross-attention map visualization using open source code [7]. (b) Highly responsive regions of each word. (c) The illustration of the patch-level prompt generation process, including morphological operations to eliminate small connected areas. Here we use the word "Astronaut" as an example. All words in the prompt will go through the above process.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
Attention Map
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Mask
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
Eroded Mask
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
Dilated Mask
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Patch-level prompt
|
| 195 |
+
|
| 196 |
+
# 3.3 Patch-Content-Aware Prompts
|
| 197 |
+
|
| 198 |
+
Considering the significance of text prompt in higher-resolution generation, we explore patch-content-aware substring set $\{\gamma^i\}_{i=1}^{P_1}$ of the entire text prompt, each of which is responsible for injecting a condition to the corresponding patch. In general, it is challenging to know in advance what content a patch generates, but in DemoFusion [4], the global information from low-resolution image is injected into the high-resolution image generation through residual connections. Therefore, the structure of the generated higher-resolution image is similar to that of the low-resolution image. This inspires us to decide patch contents from the low-resolution image. A direct but cumbersome approach is to manually observe the patch content of low-resolution image and then set the prompt for each patch, which undermines the usability of stable diffusion. Another approach is to use SAM [13] to segment the upscaled low-resolution image and determine whether each object appears in the patch, introducing huge storage and computational costs of the segmentation model. How to automatically generate patch-content-aware prompts without external models is the key to success.
|
| 199 |
+
|
| 200 |
+
Inspired by image editing [7], instead we consider the cross-attention maps in low-resolution generation $\mathcal{M} \in \mathbb{R}^{N \times M}$ , to determine patch-content-aware prompts. Recall $N$ represents the pixel number of the latent noise $z_{t}$ and $M$ denotes the number of word tokens in the prompt $y$ . Thus, the column $\mathcal{M}_{:j}$ represents the attentiveness of latent noise to the $j$ -th word token. The basic principle lies in that the attentiveness $(\mathcal{M}_{i,j})$ of image regions is mostly higher than others if it is attended by the $j$ -th word token, as shown in Fig. 4(a). To
|
| 201 |
+
|
| 202 |
+
find the highly relevant region of each word token, we convert the attention map $\mathcal{M}$ into a binary mask $\mathcal{B} \in \mathbb{R}^{N \times M}$ as:
|
| 203 |
+
|
| 204 |
+
$$
|
| 205 |
+
\mathcal {B} _ {i, j} = \left\{ \begin{array}{l l} 1, & \text {i f} \mathcal {M} _ {i, j} > \overline {{\mathcal {M}}} _ {:, j}, \\ 0, & \text {o t h e r w i s e}, \end{array} \right. \tag {5}
|
| 206 |
+
$$
|
| 207 |
+
|
| 208 |
+
where $i$ and $j$ enumerate $N$ and $M$ , respectively. The threshold $\overline{\mathcal{M}}_{:,j}$ is the mean of $\mathcal{M}_{:,j}$ , which design is elaborated in Sec. 4.4. Regions with values above the threshold are considered highly responsive, while regions with values below the threshold are considered less responsive.
|
| 209 |
+
|
| 210 |
+
Next, we obtain word-level masks $\{\mathcal{B}_j\}_{j=1}^M$ using the following equation:
|
| 211 |
+
|
| 212 |
+
$$
|
| 213 |
+
\tilde {\mathcal {B}} _ {j} = \operatorname {R e s h a p e} \left(\mathcal {B} _ {:, j}, \left(h _ {a}, w _ {a}\right)\right), \tag {6}
|
| 214 |
+
$$
|
| 215 |
+
|
| 216 |
+
where $h_a = \frac{h}{s}$ and $w_a = \frac{w}{s}$ represent the height and width of the attention map, respectively. Recall $h$ and $w$ represent the height and width of the noise, respectively. The "s" corresponds to the down-sampling scale in the corresponding block of the U-Net model. The mask $\mathcal{B}_j$ oriented for the $j$ -th word token is reshaped into a 2d shape for further processing.
|
| 217 |
+
|
| 218 |
+
After obtaining the highly responsive regions for each word, we observe that they contain many small connected areas, as shown in Fig. 4(b). To alleviate the influence of these small connected areas, we apply the opening operation $\mathcal{O}(\cdot)$ from mathematical morphology [27], resulting in the final mask for each word, as shown in Fig. 4(c). The processed mask $\{\tilde{B}_j\}_{j=1}^M$ can be formulated as:
|
| 219 |
+
|
| 220 |
+
$$
|
| 221 |
+
\tilde {\mathcal {B}} _ {j} = \mathcal {O} (\hat {\mathcal {B}} _ {j}) = \omega (\delta (\hat {\mathcal {B}} _ {j})), \tag {7}
|
| 222 |
+
$$
|
| 223 |
+
|
| 224 |
+
where $\delta(\cdot)$ and $\omega(\cdot)$ is erosion operation and dilation operation, respectively. Next, we interpolate $\tilde{\mathcal{B}}_j \in \mathbb{R}^{h_a \times w_a}$ to $\tilde{\mathcal{B}}_j' \in \mathbb{R}^{h_a' \times w_a'}$ , where $h_a' = \frac{h'}{s}$ and $w_a' = \frac{w'}{s}$ . Recall $h'$ and $w'$ are the size of higher-resolution latent representation as defined in Sec. 3.1. Inspired by MultiDiffusion [1], we use a shifted window to sample patches from $\tilde{\mathcal{B}}_j'$ , resulting in a series of patch masks $\{\{\mathbf{m}_j^i\}_{i=1}^{P_1}\}_{j=1}^M$ , where $\mathbf{m}_j^i \in \mathbb{R}^{h_a \times w_a}$ and $P_1$ is the total number of patches. It is important to note that each $\mathbf{m}_i^j$ corresponds to a specific patch noise $\mathbf{z}_t^i$ .
|
| 225 |
+
|
| 226 |
+
Recall if an object is not present in a patch, the corresponding word token in the text prompts should not serve as a prompt for that patch. So, we can determine the patch-content-aware prompt $\gamma^i$ , a sub-sequence of prompt $y$ , for each patch $\mathbf{z}_t^i$ using the following formulation:
|
| 227 |
+
|
| 228 |
+
$$
|
| 229 |
+
\left\{ \begin{array}{l} y _ {j} \in \gamma^ {i}, \text {i f} \frac {\sum \left(\mathbf {m} _ {j} ^ {i}\right) _ {: , :}}{h _ {a} \times w _ {a}} > c, \\ y _ {j} \notin \gamma^ {i}, \text {o t h e r w i s e}, \end{array} \right. \tag {8}
|
| 230 |
+
$$
|
| 231 |
+
|
| 232 |
+
where $j$ and $i$ enumerate $M$ and $P_{1}$ , respectively. The pre-given hyper-parameter $c \in (0,1)$ determines whether a highly responsive region's proportion of a word $y_{j}$ exceeds the threshold for inclusion in the prompts of patch $z_{t}^{i}$ . We then concatenate all words that should appear in a patch together, resulting in patch-content-aware prompts $\{\gamma^i\}_{i=1}^{P_1}$ for noise patches $\{z_t^i\}_{i=1}^{P_1}$ during patch-wise denoising.
|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
Fig. 5: Illustration of dilated sampling with window interaction: $8 \times 8$ higher-resolution and $4 \times 4$ low-resolution. The number $\{1,2,3,4\}$ represent the different positions within the same window (same color). The interaction operation is conducted in the window.
|
| 236 |
+
|
| 237 |
+
# 3.4 Dilated Sampling with Window Interaction
|
| 238 |
+
|
| 239 |
+
Recall $\mathcal{Z}_t \in \mathbb{R}^{h' \times w' \times c}$ stands for the latent representation of a higher-resolution image in Sec. 3.1. In this section, we continue proposing dilated sampling with window interaction, for a set of patch samples $\{D_t^k\}_{k=1}^{P_2}$ , to improve the global semantic information in the latent representation $\mathcal{Z}_t$ . In DemoFusion [4], each sample $D_t^k$ is a subset of the latent representation $\mathcal{Z}_t$ , formulated as:
|
| 240 |
+
|
| 241 |
+
$$
|
| 242 |
+
\mathcal {D} _ {t} ^ {k} = \left(\mathcal {Z} _ {t}\right) _ {i: h _ {s}, j: w _ {s},:,} \tag {9}
|
| 243 |
+
$$
|
| 244 |
+
|
| 245 |
+
where $k = i \times w_s + j + 1$ , and $k$ ranges from 1 to $P_2$ . The variables $i$ and $j$ range from 0 to $h_s - 1$ and $w_s - 1$ , respectively. The sampling stride is determined by $h_s = \frac{h'}{h}$ and $w_s = \frac{w'}{w}$ . Recall $\{h', w'\}$ and $\{h, w\}$ are the height and width of higher and low resolution latent representation. DemoFusion independently performs denoising on $\mathcal{D}_t$ via Eq. (4) and obtains $\mathcal{D}_{t-1} \in \mathbb{R}^{P_2 \times h \times w \times c}$ , where $P_2 = h_s \times w_s$ . Then $\{\mathcal{D}_{t-1}^k\}_{k=1}^{P_2}$ is reconstructed as $G_{t-1} \in \mathbb{R}^{h' \times w' \times c}$ and added to patch-wise denoised latent representation $\mathcal{Z}_{t-1}$ using:
|
| 246 |
+
|
| 247 |
+
$$
|
| 248 |
+
\hat {\mathcal {Z}} _ {t - 1} = (1 - \eta) \cdot \mathcal {Z} _ {t - 1} + \eta \cdot G _ {t - 1}, \tag {10}
|
| 249 |
+
$$
|
| 250 |
+
|
| 251 |
+
where $(G_{t - 1})_{i::h_s,j::w_{s,:}} = \mathcal{D}_{t - 1}^k$ and $\eta$ decreases from 1 to 0 using a cosine schedule. Due to the lack of interaction between different samples during the denoising process, the global semantic information is non-smooth, as depicted in Fig. 3(c). The sharp global semantic information disturbs the higher-resolution generation.
|
| 252 |
+
|
| 253 |
+
To solve above issue, as illustrated in Fig. 5, we enable window interaction between different samples before each denoising process through bijective function:
|
| 254 |
+
|
| 255 |
+
$$
|
| 256 |
+
\mathcal {D} _ {t} ^ {k, h, w} = \mathcal {D} _ {t} ^ {f _ {t} ^ {h, w} (k), h, w}, \quad f _ {t} ^ {h, w}: \{1, 2, \dots , P _ {2} \} \Rightarrow \{1, 2, \dots , P _ {2} \}, \tag {11}
|
| 257 |
+
$$
|
| 258 |
+
|
| 259 |
+
where $f_{t}^{h,w}$ is bijective function, and the mapping varies based on the position or time step. We then perform normal denoising progress on $\{\mathcal{D}_t^k\}_{k=1}^{P_2}$ to obtain $\{\mathcal{D}_{t-1}^k\}_{k=1}^{P_2}$ . Before applying Eq. (10) to $\{\mathcal{D}_{t-1}^k\}_{k=1}^{P_2}$ , we use the inverse mapping $(f_t^{h,w})^{-1}$ of $f_t^{h,w}$ to recover the position as:
|
| 260 |
+
|
| 261 |
+
$$
|
| 262 |
+
\mathcal {D} _ {t - 1} ^ {k, h, w} = \mathcal {D} _ {t - 1} \left(f _ {t} ^ {h, w}\right) ^ {- 1} (k), \quad \left(f _ {t} ^ {h, w}\right) ^ {- 1}: \{1, 2, \dots , P _ {2} \} \Rightarrow \{1, 2, \dots , P _ {2} \}. \tag {12}
|
| 263 |
+
$$
|
| 264 |
+
|
| 265 |
+
# 4 Experimentation
|
| 266 |
+
|
| 267 |
+
# 4.1 Experimental Setup
|
| 268 |
+
|
| 269 |
+
AccDiffusion is a plug-and-play extension to stable diffusion without additional training costs. We mainly validate the feasibility of AccDiffusion using the pretrained SDXL [17]. More results for other stable diffusion variants are in the supplementary material. AccDiffusion follows the pipeline of DemoFusion [4] (SOTA higher-resolution generation) and uses the patch-content-aware prompts during the progress of higher-resolution image generation. Additionally, AccDiffusion enhances dilated sampling with window interaction. For fairness, we adhere to the default setting of DemoFusion, as described in the supplementary material. In Sec. 4.2, the hyper-parameter $c$ in Eq. (8) is set to 0.3. Considering the training-free nature of AccDiffusion, the methods we compare include: SDXL-DI [17], Attn-SF [12], ScaleCrafter [6], MultiDiffusion [1], and DemoFusion [4]. We do not compare with image super-resolution methods [23, 29, 32] which take images as input and have been proven to lack texture details [4, 6].
|
| 270 |
+
|
| 271 |
+
# 4.2 Quantitative Comparison
|
| 272 |
+
|
| 273 |
+
For quantitative comparison, we use three widely-recognized metrics: FID (Frechet Inception Distance) [8], IS (Inception Score) [24], and CLIP Score [19]. Specifically, $\mathrm{FID}_r$ measures the Frechet Inception Distance between generated high-resolution images and real images. $\mathrm{IS}_r$ represents the Inception Score of generated high-resolution images. Given that $\mathrm{FID}_r$ and $\mathrm{IS}_r$ necessitate resizing images to $299^2$ , which may not be ideal for assessing high-resolution images. Motivated by [2, 4], we crop 10 local patches at $1\times$ resolution from each generated high-resolution image and subsequently resize them to calculate $\mathrm{FID}_c$ and $\mathrm{IS}_c$ . The CLIP score measures the cosine similarity between image embedding and text prompts. We randomly selected 10,000 images from the Laion-5B [25] dataset as our real images set and randomly chose 1,000 text prompts from Laion-5B as inputs for AccDiffusion to generate a set of high-resolution images.
|
| 274 |
+
|
| 275 |
+
As shown in Table 1, AccDiffusion achieves the best results and obtains state-of-the-art performance. Since the implementation of AccDiffusion is based on DemoFusion [4], it exhibits similar quantitative results and inference times with DemoFusion. However, AccDiffusion outperforms DemoFusion due to its more precise patch-content-aware prompt and more accurate global information introduced by dilated sampling with interaction, especially in high-resolution generation scenarios. Compared to other training-free image generation extrapolation methods, the quantitative results of AccDiffusion are closer to quantitative results calculated at pre-trained resolutions $(1024 \times 1024)$ , demonstrating the excellent image generation extrapolation capabilities of AccDiffusion. Note that FID, IS, and CLIP-Score do not intuitively reflect the degree of repeated generation in the resulting images, so we conduct a qualitative comparison to validate the effectiveness of AccDiffusion in eliminating repeated generation.
|
| 276 |
+
|
| 277 |
+
Table 1: Comparison of quantitative metrics between different training-free image generation extrapolation methods. We use **bold** to emphasize the best result and **underline** to emphasize the second best result.
|
| 278 |
+
|
| 279 |
+
<table><tr><td>Resolution</td><td>Method</td><td>FIDr↓</td><td>ISr↑</td><td>FIDc↓</td><td>ISc↑</td><td>CLIP↑</td><td>Time</td></tr><tr><td>1024 × 1024 (1×)</td><td>SDXL-DI</td><td>58.49</td><td>17.39</td><td>58.08</td><td>25.38</td><td>33.07</td><td><1 min</td></tr><tr><td rowspan="6">2048 × 2048 (4×)</td><td>SDXL-DI</td><td>124.40</td><td>11.05</td><td>88.33</td><td>14.64</td><td>28.11</td><td>1 min</td></tr><tr><td>Attn-SF</td><td>124.15</td><td>11.15</td><td>88.59</td><td>14.81</td><td>28.12</td><td>1 min</td></tr><tr><td>MultiDiffusion</td><td>81.46</td><td>12.43</td><td>44.80</td><td>20.99</td><td>31.82</td><td>2 min</td></tr><tr><td>ScaleCrafter</td><td>99.47</td><td>12.52</td><td>74.64</td><td>15.42</td><td>28.82</td><td>1 min</td></tr><tr><td>DemoFusion</td><td>60.46</td><td>16.45</td><td>38.55</td><td>24.17</td><td>32.21</td><td>3 min</td></tr><tr><td>AccDiffusion</td><td>59.63</td><td>16.48</td><td>38.36</td><td>24.62</td><td>32.79</td><td>3 min</td></tr><tr><td rowspan="6">3072 × 3072 (9×)</td><td>SDXL-DI</td><td>170.61</td><td>7.83</td><td>112.51</td><td>12.59</td><td>24.53</td><td>3 min</td></tr><tr><td>Attn-SF</td><td>170.62</td><td>7.93</td><td>112.46</td><td>12.52</td><td>24.56</td><td>3 min</td></tr><tr><td>MultiDiffusion</td><td>101.11</td><td>8.83</td><td>51.95</td><td>17.74</td><td>29.49</td><td>6 min</td></tr><tr><td>ScaleCrafter</td><td>131.42</td><td>9.62</td><td>105.79</td><td>11.91</td><td>27.22</td><td>7 min</td></tr><tr><td>DemoFusion</td><td>62.43</td><td>16.41</td><td>47.45</td><td>20.42</td><td>32.25</td><td>11 min</td></tr><tr><td>AccDiffusion</td><td>61.40</td><td>17.02</td><td>46.46</td><td>20.77</td><td>32.82</td><td>11 min</td></tr><tr><td rowspan="6">4096 × 4096 (16×)</td><td>SDXL-DI</td><td>202.93</td><td>6.13</td><td>119.54</td><td>11.32</td><td>23.06</td><td>9 min</td></tr><tr><td>Attn-SF</td><td>203.08</td><td>6.26</td><td>119.68</td><td>11.66</td><td>23.10</td><td>9 min</td></tr><tr><td>MultiDiffusion</td><td>131.39</td><td>6.56</td><td>61.45</td><td>13.75</td><td>26.97</td><td>10 min</td></tr><tr><td>ScaleCrafter</td><td>139.18</td><td>9.35</td><td>116.90</td><td>9.85</td><td>26.50</td><td>20 min</td></tr><tr><td>DemoFusion</td><td>65.97</td><td>15.67</td><td>59.94</td><td>16.60</td><td>33.21</td><td>25 min</td></tr><tr><td>AccDiffusion</td><td>63.89</td><td>16.05</td><td>58.51</td><td>16.72</td><td>33.79</td><td>26 min</td></tr></table>
|
| 280 |
+
|
| 281 |
+
# 4.3 Qualitative Comparison
|
| 282 |
+
|
| 283 |
+
In Fig. 6, AccDiffusion is compared with other training-free text-to-image generation extrapolation methods, such as MultiDiffusion [1], ScaleCrafter [6], and DemoFusion [4]. We provide more results in supplementary material. MultiDiffusion can generate seamless images but suffers severe repeated and distorted generation. ScaleCrafter, while avoiding the repetition of astronauts, suffers from structural distortions as highlighted in the red box, resulting in local repetition and a lack of coherence. DemoFusion tends to generate small, repeated astronauts, with the frequency of repetition escalating with image resolution, thereby significantly degrading image quality. Conversely, AccDiffusion demonstrates superior performance in generating high-resolution images without such repetitions. As Attn-SF [12] and SDXL-DI [17] cannot alleviate the repetition issue, their qualitative results are not compared here.
|
| 284 |
+
|
| 285 |
+
# 4.4 Ablation Study
|
| 286 |
+
|
| 287 |
+
In this section, we first perform ablation studies on the two core modules proposed in this paper, and then discuss the settings of the threshold for the binary mask in Eq. (5) and the threshold $c$ for deciding patch-content-aware prompt in
|
| 288 |
+
|
| 289 |
+

|
| 290 |
+
2048×2048
|
| 291 |
+
|
| 292 |
+

|
| 293 |
+
4096×2048
|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
2048×2048
|
| 297 |
+
|
| 298 |
+

|
| 299 |
+
4096x2048
|
| 300 |
+
|
| 301 |
+

|
| 302 |
+
2048×4096
|
| 303 |
+
|
| 304 |
+

|
| 305 |
+
4096×4096
|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
2048×4096
|
| 309 |
+
|
| 310 |
+

|
| 311 |
+
4096x4096
|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
(a) MultiDiffusion
|
| 315 |
+
2048×2048
|
| 316 |
+
|
| 317 |
+

|
| 318 |
+
4096×2048
|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
2048×2048
|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
(b) ScaleCrafter
|
| 325 |
+
4096×2048
|
| 326 |
+
|
| 327 |
+

|
| 328 |
+
2048×4096
|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
4096×4096
|
| 332 |
+
(c) Demofusion
|
| 333 |
+
Prompt: Astronaut on Mars During sunset.
|
| 334 |
+
Fig. 6: Qualitative comparison of our AccDiffusion with existing training-free image generation extrapolation methods [1, 4, 6]. We draw a red box upon the generated images to highlight the repeated objects. Best viewed zoomed in.
|
| 335 |
+
|
| 336 |
+

|
| 337 |
+
2048×4096
|
| 338 |
+
|
| 339 |
+

|
| 340 |
+
4096x4096
|
| 341 |
+
(d) AccDiffusion (Ours)
|
| 342 |
+
|
| 343 |
+
Eq. (8). All experiments are carried out at a resolution of $4096^2$ $(16\times)$ . Considering the fact that existing quantitative metrics are unable to accurately reflect the extent of object repetition, we choose to provide visualizations to demonstrate the effectiveness of our core modules in preventing repeated generation.
|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
Fig. 7: Ablations of Patch-content-aware prompts (P) and Dilated sampling with window interaction (D). The “X”/“√” denotes removing/preserving the component. The repeated objects are highlighted by a red box. Best viewed zoomed in.
|
| 347 |
+
|
| 348 |
+

|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
|
| 352 |
+

|
| 353 |
+
|
| 354 |
+
Table 2: Statistics of cross-attention maps $\mathcal{M}$ using prompt $y =$ "Astronaut on mars during sunset." as an example. Each word $\{y_{j}\}_{j=1}^{6}$ has a cross-attention map $\{\mathcal{M}_{\therefore j}\}_{j=1}^{6}$ .
|
| 355 |
+
|
| 356 |
+
<table><tr><td>Statistics</td><td>“Astronaut” (j=1)</td><td>“on” (j=2)</td><td>“mars” (j=3)</td><td>“during” (j=4)</td><td>“sunset” (j=5)</td><td>“.” (j=6)</td></tr><tr><td>Min(M(:,j)</td><td>0.1274</td><td>0.0597</td><td>0.2039</td><td>0.0457</td><td>0.0921</td><td>0.0335</td></tr><tr><td>Mean(M(:,j)</td><td>0.1499</td><td>0.0676</td><td>0.2533</td><td>0.0521</td><td>0.1189</td><td>0.0386</td></tr><tr><td>Max(M(:,j)</td><td>0.2096</td><td>0.0779</td><td>0.2979</td><td>0.0585</td><td>0.1499</td><td>0.0419</td></tr></table>
|
| 357 |
+
|
| 358 |
+
Ablations on Core Modules. As illustrated in Fig. 7, the absence of any module leads to a decline in generation quality. Without patch-content-aware prompts, the resulting image contains numerous repeated small objects, emphasizing the importance of patch-content-aware prompts in preventing the generation of repetitive elements. Conversely, without our window interaction in dilated sampling, the generated small object becomes unrelated to the image, indicating that dilated sampling with window interaction enhances the image's semantic consistency and suppresses repetition. The maximum number of repeated objects is produced when both modules are removed, while employing both modules simultaneously generates an image free of repetitions. This implies that the two modules work together to effectively alleviate repetitive objects.
|
| 359 |
+
|
| 360 |
+
Ablations on Hyper-Parameters. As depicted in Table 2, there is a significant variation in the range of different cross-attention maps $\mathcal{M}_j$ . When using a fixed threshold for these maps, two scenarios may occur. If the threshold is too high, some words will not have highly responsive regions in their corresponding attention maps, resulting in their absence from the patch-content-aware prompt. Conversely, if the threshold is too low, the entire attention map consists of highly responsive regions, causing those words to consistently appear in the patch-content-aware prompt. By considering the average $\overline{\mathcal{M}}_{:,j}$ , we can ensure that each word has suitable highly responsive regions, as demonstrated in Fig. 4(b).
|
| 361 |
+
|
| 362 |
+

|
| 363 |
+
Fig. 8: Visual results of different threshold $c$ , prompted by "A cute corgi on the lawn." The repeated objects are highlighted with a red box and the detail degradation is stressed with a blue box. Best viewed zoomed in.
|
| 364 |
+
|
| 365 |
+
Recall in Eq. (8), the $c$ determines whether the proportion of a highly responsive region for a word $y_{j}$ surpasses the threshold required for inclusion in the prompts of patch $z_{t}^{i}$ . A very small value of $c$ leads to more words being included in the patch prompt, potentially causing object repetition. Conversely, a very large value of $c$ simplifies the patch prompt, which may lead to degradation of details. Our analysis is demonstrated in Fig. 8. It should be noted that this is a user-specific hyper-parameter, adjustable to suit different application scenarios.
|
| 366 |
+
|
| 367 |
+
# 5 Limitations and Future work
|
| 368 |
+
|
| 369 |
+
AccDiffusion is limited in: (1) As it follows the DemoFusion pipeline, similar drawbacks arise such as inference latency from progressive upscaling and overlapped patch-wise denoising. (2) AccDiffusion focuses on image generation extrapolation, meaning the fidelity of high-resolution images depends on the pretrained diffusion model. (3) Relying on LDMs' prior knowledge of cropped images, it may produce local irrational content in sharp close-up image generation.
|
| 370 |
+
|
| 371 |
+
Future studies could explore the possibility of developing non-overlapped patch-wise denoising techniques for efficiently generating high-resolution images.
|
| 372 |
+
|
| 373 |
+
# 6 Conclusion
|
| 374 |
+
|
| 375 |
+
In this paper, we propose AccDiffusion to address the object-repeated generation issue in higher-resolution image generation without training. AccDiffusion first introduces patch-content-aware prompts, which makes the patch-wise denoising more accurate and can avoid repeated generation from the root. And then we further propose dilated sampling with window interaction to enhance the global consistency during higher-resolution image generation. Extensive experiments, including qualitative and quantitative results, show that AccDiffusion can successfully conduct higher-resolution image generation without object repetition.
|
| 376 |
+
|
| 377 |
+
# Acknowledgements
|
| 378 |
+
|
| 379 |
+
This work was supported by National Science and Technology Major Project (No. 2022ZD0118202), the National Science Fund for Distinguished Young Scholars (No.62025603), the National Natural Science Foundation of China (No. U21B2037, No. U22B2051, No. U23A20383, No. 62176222, No. 62176223, No. 62176226, No. 62072386, No. 62072387, No. 62072389, No. 62002305 and No. 62272401), and the Natural Science Foundation of Fujian Province of China (No.2022J06001).
|
| 380 |
+
|
| 381 |
+
# References
|
| 382 |
+
|
| 383 |
+
1. Bar-Tal, O., Yariv, L., Lipman, Y., Dekel, T.: Multidiffusion: Fusing diffusion paths for controlled image generation. In: ICML (2023)
|
| 384 |
+
2. Chai, L., Gharbi, M., Shechtman, E., Isola, P., Zhang, R.: Any-resolution training for high-resolution image synthesis. In: ECCV (2022)
|
| 385 |
+
3. Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. In: NeurIPS (2021)
|
| 386 |
+
4. Du, R., Chang, D., Hospedales, T., Song, Y.Z., Ma, Z.: Demofusion: Democratising high-resolution image generation with no $$$. In: CVPR (2024)
|
| 387 |
+
5. Ghosal, D., Majumder, N., Mehrish, A., Poria, S.: Text-to-audio generation using instruction-tuned llm and latent diffusion model. In: ACM MM (2023)
|
| 388 |
+
6. He, Y., Yang, S., Chen, H., Cun, X., Xia, M., Zhang, Y., Wang, X., He, R., Chen, Q., Shan, Y.: Scalecrafter: Tuning-free higher-resolution visual generation with diffusion models. In: ICLR (2024)
|
| 389 |
+
7. Hertz, A., Mokady, R., Tenenbaum, J., Aberman, K., Pritch, Y., Cohen-Or, D.: Prompt-to-prompt image editing with cross attention control. In: ICLR (2023)
|
| 390 |
+
8. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: NeurIPS (2017)
|
| 391 |
+
9. Ho, J., Chan, W., Sahara, C., Whang, J., Gao, R., Gritsenko, A., Kingma, D.P., Poole, B., Norouzi, M., Fleet, D.J., et al.: Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303 (2022)
|
| 392 |
+
0. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS (2020)
|
| 393 |
+
1. Huang, R., Huang, J., Yang, D., Ren, Y., Liu, L., Li, M., Ye, Z., Liu, J., Yin, X., Zhao, Z.: Make-an-audio: Text-to-audio generation with prompt-enhanced diffusion models. In: ICML (2023)
|
| 394 |
+
2. Jin, Z., Shen, X., Li, B., Xue, X.: Training-free diffusion model adaptation for variable-sized text-to-image synthesis. In: NeurIPS (2023)
|
| 395 |
+
3. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. In: ICCV (2023)
|
| 396 |
+
4. Lee, Y., Kim, K., Kim, H., Sung, M.: Syncdiffusion: Coherent montage via synchronized joint diffusions. In: NeurIPS (2023)
|
| 397 |
+
5. Lin, C.H., Gao, J., Tang, L., Takikawa, T., Zeng, X., Huang, X., Kreis, K., Fidler, S., Liu, M.Y., Lin, T.Y.: Magic3d: High-resolution text-to-3d content creation. In: CVPR (2023)
|
| 398 |
+
6. Nichol, A.Q., Dhariwal, P.: Improved denoising diffusion probabilistic models. In: ICML (2021)
|
| 399 |
+
|
| 400 |
+
17. Podell, D., English, Z., Lacey, K., Blattmann, A., Dockhorn, T., Müller, J., Penna, J., Rombach, R.: Sdxl: Improving latent diffusion models for high-resolution image synthesis. In: ICLR (2024)
|
| 401 |
+
18. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. In: ICLR (2023)
|
| 402 |
+
19. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021)
|
| 403 |
+
20. Robin Rombach, P.E.: Stable diffusion v1-5 model card, https://huggingface.co/runwayml/stable-diffusion-v1-5
|
| 404 |
+
21. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR (2022)
|
| 405 |
+
22. Sahara, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.L., Ghasemipour, K., Gontijo Lopes, R., Karagol Ayan, B., Salimans, T., et al.: Photorealistic text-to-image diffusion models with deep language understanding. In: NeurIPS (2022)
|
| 406 |
+
23. Sahara, C., Ho, J., Chan, W., Salimans, T., Fleet, D.J., Norouzi, M.: Image superresolution via iterative refinement. TPAMI (2022)
|
| 407 |
+
24. Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X.: Improved techniques for training gans. In: NeurIPS (2016)
|
| 408 |
+
25. Schuhmann, C., Beaumont, R., Vencu, R., Gordon, C., Wightman, R., Cherti, M., Coombes, T., Katta, A., Mullis, C., Wortsman, M., et al.: Laion-5b: An open large-scale dataset for training next generation image-text models. In: NeurIPS (2022)
|
| 409 |
+
26. Singer, U., Polyak, A., Hayes, T., Yin, X., An, J., Zhang, S., Hu, Q., Yang, H., Ashual, O., Gafni, O., et al.: Make-a-video: Text-to-video generation without text-video data. In: ICLR (2023)
|
| 410 |
+
27. Soille, P., et al.: Morphological image analysis: principles and applications, vol. 2. Springer (1999)
|
| 411 |
+
28. Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. In: ICLR (2021)
|
| 412 |
+
29. Wang, J., Yue, Z., Zhou, S., Chan, K.C., Loy, C.C.: Exploiting diffusion prior for real-world image super-resolution. arXiv preprint arXiv:2305.07015 (2023)
|
| 413 |
+
30. Xie, E., Yao, L., Shi, H., Liu, Z., Zhou, D., Liu, Z., Li, J., Li, Z.: Diffit: Unlocking transferability of large diffusion models via simple parameter-efficient fine-tuning. In: ICCV (2022)
|
| 414 |
+
31. Xu, J., Wang, X., Cheng, W., Cao, Y.P., Shan, Y., Qie, X., Gao, S.: Dream3d: Zero-shot text-to-3d synthesis using 3d shape prior and text-to-image diffusion models. In: CVPR (2023)
|
| 415 |
+
32. Zhang, K., Liang, J., Van Gool, L., Timofte, R.: Designing a practical degradation model for deep blind image super-resolution. In: CVPR (2021)
|
| 416 |
+
33. Zheng, Q., Guo, Y., Deng, J., Han, J., Li, Y., Xu, S., Xu, H.: Any-size-diffusion: Toward efficient text-driven synthesis for any-size hd images. In: AAAI (2024)
|
accdiffusionanaccuratemethodforhigherresolutionimagegeneration/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8cd584d56397034ede905bda6ac8c178cc85f589ad8d841e7fe9cb9aca4fc432
|
| 3 |
+
size 741608
|
accdiffusionanaccuratemethodforhigherresolutionimagegeneration/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:42a2a00d686ca785f3dceadd98e39a585b92712d23a6dffb45c35171f269b0d2
|
| 3 |
+
size 561145
|
acceleratingimagegenerationwithsubpathlinearapproximationmodel/ae593f34-493e-4aa3-98d2-67b350c8f228_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2a888909d6eacbe520fe3d5c7b1b251194cc63ce374084a7334c46f7f6780dd0
|
| 3 |
+
size 88942
|
acceleratingimagegenerationwithsubpathlinearapproximationmodel/ae593f34-493e-4aa3-98d2-67b350c8f228_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7819e6a41866ededdea60a2b68239906326b9d3827a9c47be03498026d4c3ccb
|
| 3 |
+
size 109122
|
acceleratingimagegenerationwithsubpathlinearapproximationmodel/ae593f34-493e-4aa3-98d2-67b350c8f228_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e660cb6a013cc7d3de27f11eeb1688452617c9e4379c093282f53c9b2d2f8ac
|
| 3 |
+
size 10053855
|
acceleratingimagegenerationwithsubpathlinearapproximationmodel/full.md
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Image Generation with Sub-Path Linear Approximation Model
|
| 2 |
+
|
| 3 |
+
Chen $\mathrm{Xu}^{1,2\dagger}$ , Tianhui Song $^{1,2\dagger}$ , Weixin Feng $^{2}$ , Xubin Li $^{2}$ , Tiezheng Ge $^{2}$ , Bo Zheng $^{2}$ , and Limin Wang $^{1,3,*}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China
|
| 6 |
+
2 Alibaba Group, Hangzhou, China 3 Shanghai AI Lab, Shanghai, China
|
| 7 |
+
|
| 8 |
+
Abstract. Diffusion models have significantly advanced the state of the art in image, audio, and video generation tasks. However, their applications in practical scenarios are hindered by slow inference speed. Drawing inspiration from the consistency models, we propose the Sub-Path Linear Approximation Model (SPLAM), which can accelerate diffusion models while maintaining high-quality image generation. SPLAM treats the PF-ODE trajectory as a series of PF-ODE sub-paths divided by sampled points, and harnesses sub-path linear (SL) ODEs to form a progressive and continuous error estimation along each individual PF-ODE sub-path. The optimization on such SL-ODEs allows SPLAM to construct denoising mapping with smaller cumulative approximated error. An efficient distillation method is also developed to facilitate the incorporation of pre-trained diffusion models, such as latent diffusion models. The extensive experimental results demonstrate SPLAM achieves remarkable training efficiency, requiring only 6 A100 GPU days to produce a high-quality generative model capable of 2 to 4-step generation. Comprehensive evaluations on LAION, MS COCO 2014, and MS COCO 2017 datasets also illustrate that SPLAM surpasses the existing acceleration methods in few-step generation tasks, achieving state-of-the-art performance both on FID and the quality of the generated images.
|
| 9 |
+
|
| 10 |
+
Keywords: Diffusion Models $\cdot$ Accelerating Diffusion Models $\cdot$ Diffusion Model Distillation $\cdot$ Consistency Models.
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
Diffusion models, also known as score-based generative models, have emerged as a potent paradigm in generative computer vision, enabling the synthesis of highly realistic images by progressively refining random noise into structured visual content [9,27,29,42,43]. Despite their impressive ability, one of the primary challenges associated with diffusion models lies in their computational intensity, often requiring hundreds of iteration steps to produce a single image. This has spurred a surge of research focused on accelerating diffusion models to retain
|
| 15 |
+
|
| 16 |
+
high-quality outputs while significantly reducing the computation cost during the inference phase [19-22, 24, 33, 39, 41, 46, 47].
|
| 17 |
+
|
| 18 |
+
Within the spectrum of acceleration techniques, consistency models [24, 41] have garnered attention as they forge a consistent denoising mapping across points on Probability Flow (PF) ODE trajectories. The learning strategy brings consistency models a notable consistency property and could estimate the overall prediction errors as a summation of incremental errors, which are computed as the difference between the predicted results of adjacent trajectory points. In this paper, we recognize that the approximation of denoising mappings by consistency models is essentially a minimization process targeting the endpoints of sub-paths along ODE trajectories. We observe that the approximated performance is currently limited by the accumulation of errors that arise from either an overabundance of approximation operations, or the heightened challenge of optimizing individual sub-path errors as the skipping step size expands.
|
| 19 |
+
|
| 20 |
+
To address these challenges, we propose a novel approach in this paper, designated as the Sub-Path Linear Approximation Model (SPLAM). SPLAM adheres to the foundational concept of cumulative approximation of PF-ODE trajectories but innovates through its sustained learning from Sub-Path Linear (SL) ODEs. Specifically, we dissect the sub-path learning objective based on the noise prediction design [9, 13] into two interrelated aspects, and establish the SL-ODEs to give respective progressive or continuous estimation for each component, by a carefully designed linear interpolation between the endpoints of sub-paths. We then utilize the SL-ODEs to approximate the complete PF-ODE trajectories which allows a more nuanced optimization. Consequently, the prediction error of our approach is assessed through iterative solutions of all SL-ODEs, enabling a reduction of cumulative errors and an enhancement in image generation quality. Furthermore, we also develop an efficient distillation procedure for our SPLAM which enables the incorporation with pre-trained latent diffusion models [31] (e.g., Stable Diffusion). Our contributions can be summarized as below:
|
| 21 |
+
|
| 22 |
+
1. We identify that the optimization process for consistency models essentially minimizes the cumulative approximated error along PF-ODE sub-path endpoints, and observe that the performance of such approximations is hindered by the proliferating number of approximations or the amplified difficulty in optimizing single sub-path errors for as skipping step size increases.
|
| 23 |
+
2. To address these challenges, we propose a novel approach as Sub-Path Linear Approximation Model (SPLAM). SPLAM employs Sub-Path Linear (SL) ODEs to continuously approximate the complete PF-ODE trajectories and progressively optimize the sub-path learning objectives, which could construct the denoising mappings with smaller cumulative approximated errors.
|
| 24 |
+
3. Leveraging the proposed SPLAM and SL-ODE framework, we put forth an efficient distillation method. When integrated with powerful pre-trained models like Stable Diffusion, our approach allows more efficient training and respectively attains impressive FIDs as 10.09, 10.06, 20.77 in LAION, MS COCO 2014, MS COCO 2017 datasets, achieving better performance and close inference latency to all previous accelerating approaches.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
Fig. 1: Our Sub-Path Linear Approximation Model employs Sub-Path Linear ODEs to approximate the sub-paths on the PF-ODE trajectories, which is determined by the linear interpolation of corresponding endpoints. SPLAM is then trained based on the consistent mapping along SL-ODEs to minimize the approximated errors.
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
Diffusion Models [1,9,13,28,31,37,43] have solidified their status as a cornerstone in the realm of generative models, outshining previous approaches in creating rich and detailed images. Song et al. [43] model this process from continuous-time perspective with a stochastic differential equation (SDE), which iteratively denoise an initial noise distribution leveraging the learned score of the data distribution to steer the process towards data points [9, 42, 43]. This reverse diffusion process has been verified to be particularly adept at capturing the intricate structures and variations inherent in complex data sets. They also demonstrate that there exists an ordinary differential equation (ODE), dubbed as Probability Flow (PF) ODE, which shares the marginal probability densities with the reverse-time SDE and thus yields a deterministic sampling trajectory [13, 43]. In contrast to other generative models like VAEs [14, 38] and GANs [6], diffusion models demonstrate remarkable robustness in training and excel in producing samples with substantial diversity and high fidelity, thereby offering a robust solution for modeling complex distributions in an ever-expanding array of generative tasks.
|
| 34 |
+
|
| 35 |
+
Accelerating Diffusion Models. While diffusion models have demonstrated their superiority in generating high-quality samples, the generation speed remains a major hindrance due to requiring thousands of sampling steps, which poses difficulties for practical and efficient applications. To address these issues, a surge of advancements has emerged aiming to accelerate the inference process. Some works concentrate on designing non-training fast diffusion samplers [2, 11, 13, 18, 21, 22, 43, 52], potentially cutting down the steps from one thousand to a modest 20-50. In the realm of distillation [8], efforts have been undertaken [3, 7, 23, 26, 33, 50, 52] to condense the inference steps of pre-trained diffusion models to fewer than 10. Progressive distillation (PD) [33] seeks to amortize the integration of PF-ODE into a new sampler that takes half as many sampling steps, displaying efficacy with as few as 2/4 steps. Consistency mod
|
| 36 |
+
|
| 37 |
+
els [24, 25, 40, 41], as a nascent class of models, offer the promise of high-quality one-step generation by mapping any point along the PF-ODE trajectory back to the origin. Representing flow-based approaches [17, 19, 20, 44], InstaFlow [19, 20] propose a reflow technique to straighten the trajectories of probability flows and refine the coupling between noises and images, which achieves a one-step SD model. Concurrently, some strategies are exploring the inclusion of GAN-like objectives into diffusion models to afford fast generative capabilities [16, 34, 46, 47]. DMD [47] additionally proposes a distribution matching method that enables one-step high-quality image generation.
|
| 38 |
+
|
| 39 |
+
# 3 Preliminaries
|
| 40 |
+
|
| 41 |
+
Diffusion Models are a class of generative models that gradually transform data into a noisy state through Gaussian perturbations and subsequently learn to reverse this process to reconstruct the original data by progressively denoising it. Denote $\pmb{x}_0$ as the data sampled from the original distribution $\pmb{x}_0 \sim p_{data}(\pmb{x})$ and $\alpha(t), \sigma(t)$ as functions that define a noise schedule. Diffusion models transition the data to a noise-corrupted marginal distribution, which can be expressed as:
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
p _ {t} \left(\boldsymbol {x} _ {t} \mid \boldsymbol {x} _ {0}\right) = \mathcal {N} \left(\boldsymbol {x} _ {t} \mid \alpha (t) \boldsymbol {x} _ {0}, \sigma (t) ^ {2} I\right), \tag {1}
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
for any time step $t\in [0,T]$
|
| 48 |
+
|
| 49 |
+
Song et al. [43] describe the diffusion process using a stochastic differential equation (SDE):
|
| 50 |
+
|
| 51 |
+
$$
|
| 52 |
+
d \boldsymbol {x} _ {t} = \boldsymbol {f} (\boldsymbol {x} _ {t}, t) d t + g (t) d \boldsymbol {w} _ {t}, \tag {2}
|
| 53 |
+
$$
|
| 54 |
+
|
| 55 |
+
where $f(\cdot, \cdot)$ and $g(\cdot)$ denote the drift and diffusion coefficients, respectively, and $\boldsymbol{w}_t$ signifies the standard Brownian motion at time $t$ . They also derive an ordinary differential equation (ODE) corresponding to this SDE, which defines the trajectories of solutions sampled at time $t$ according to $p_t(\boldsymbol{x}_t)$ :
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
d \boldsymbol {x} _ {t} = \left[ \boldsymbol {f} (\boldsymbol {x} _ {t}, t) - \frac {1}{2} g (t) ^ {2} \nabla_ {\boldsymbol {x}} \log p _ {t} (\boldsymbol {x} _ {t}) \right] d t, \tag {3}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
referred to as the Probability Flow (PF) ODE. In the reverse denoising process, models are taught to learn a score function $\mathbf{s}_{\theta}(\pmb{x}_t,t)\approx \nabla \log p_t(\pmb{x}_t)$ , adhering to the PF-ODE. Therefore, diffusion models are also recognized as score-based generative models. Based on the diffusion process, latent diffusion models (LDMs) additionally employ a VAE encoder $\mathcal{E}(\cdot)$ and decoder $\mathcal{D}(\cdot)$ to compress the image $\pmb{x}$ into latent space as $\pmb{z} = \mathcal{E}(\pmb{x})$ and reconstruct it by the decoder: $\hat{\pmb{x}} = \mathcal{D}(\pmb {z})$ , and implement the diffusion process on the compressed vector $\pmb{z}$ via latent space [31]. With the latent diffusion process, the pre-trained large-scale LDMs like Stable Diffusion (SD) Models could achieve more precise PF-ODE solutions and thus generate high-quality images.
|
| 62 |
+
|
| 63 |
+
Consistency Model has been proposed by Song et al. [41] as a novel paradigm within the family of generative models. Considering a solution trajectory of the PF-ODE $\{(\pmb{x}_t,t)\}_{t\in [\epsilon ,T]}$ , consistency models comply with a consistency function
|
| 64 |
+
|
| 65 |
+
that projects every pair $(\pmb{x}_t,t)$ along the trajectory back to the starting point: $\pmb {F}(\pmb {x}_t,t)\mapsto \pmb {x}_{\epsilon}$ , for any $t\in [\epsilon ,T]$ , to obtain a one-step generator. Here, $\epsilon$ represents a small positive constant, thereby making $\pmb{x}_{\epsilon}$ a viable surrogate for $\pmb{x}_0$ . An important characteristic of the consistency models is the self-consistency property:
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\boldsymbol {F} \left(\boldsymbol {x} _ {t}, t\right) = \boldsymbol {F} \left(\boldsymbol {x} _ {t} ^ {\prime}, t ^ {\prime}\right), \quad \forall t, t ^ {\prime} \in [ \epsilon , T ], \tag {4}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
which is leveraged as the training constraint for the consistency models, whether when distilling knowledge from a pre-trained model or training from scratch. The model is parameterized as follows:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\boldsymbol {F} _ {\boldsymbol {\theta}} (\boldsymbol {x} _ {t}, t) = c _ {\mathrm {s k i p}} (t) \boldsymbol {x} _ {t} + c _ {\mathrm {o u t}} (t) \boldsymbol {f} _ {\boldsymbol {\theta}} (\boldsymbol {x} _ {t}, t), \tag {5}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
where $c_{\mathrm{skip}}(t)$ and $c_{\mathrm{out}}(t)$ are differentiable functions ensuring that $c_{\mathrm{skip}}(\epsilon) = 1$ and $c_{\mathrm{out}}(\epsilon) = 0$ , guaranteeing that $\pmb{F}_{\pmb{\theta}}(\pmb{x}_{\epsilon},\epsilon)\equiv \pmb{x}_{\epsilon}$ , and $\pmb {f}_{\pmb{\theta}}(\pmb {x}_t,t)$ is a deep neural network. For the distillation approach called as Consistency Distillation, the training objective is formulated as:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\mathcal {L} _ {C D} \left(\boldsymbol {\theta}, \boldsymbol {\theta} ^ {-}; \phi\right) = \mathbb {E} \left[ d \left(\boldsymbol {F} _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {t _ {n + 1}}, t _ {n + 1}\right), \boldsymbol {F} _ {\boldsymbol {\theta} ^ {-}} \left(\hat {\boldsymbol {x}} _ {t _ {n}} ^ {\boldsymbol {\Phi}}, t _ {n}\right)\right) \right], \tag {6}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $\hat{\pmb{x}}_{t_n}^{\varPhi} = \pmb{x}_{t_{n+1}} + (t_{n+1} - t_n)\varPhi(\pmb{x}_{t_{n+1}}, t_{n+1}; \phi)$ serves as a one-step estimation of $\pmb{x}_{t_n}$ based on $\pmb{x}_{t_{n+1}}$ from $\varPhi(\cdot; \phi)$ , a update function of a one-step ODE solver, and $d(\cdot, \cdot)$ is a chosen distance metric. Consistency models also utilize the EMA strategy to stabilize the training, and $\pmb{\theta}^{-}$ is the running average of $\pmb{\theta}$ . Latent Consistency Models (LCMs) [24] introduce consistency model into the distillation for latent diffusion models. To accelerate the training of consistency models, LCM employs a skipping step size $k$ to ensure consistency between the current timestep and $k$ -step away. With a conditional input $c$ and a guidance scale $w$ to achieve the CFG strategy [10], the modified learning objective for the latent consistency distillation is formulated as:
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathcal {L} _ {L C D} \left(\boldsymbol {\theta}, \boldsymbol {\theta} ^ {-}; \phi\right) = \mathbb {E} \left[ d \left(\boldsymbol {F} _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {t _ {n + k}}, w, c, t _ {n + k}\right), \boldsymbol {F} _ {\boldsymbol {\theta} ^ {-}} \left(\hat {\boldsymbol {x}} _ {t _ {n}} ^ {\Phi}, w, c, t _ {n}\right)\right) \right]. \tag {7}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
# 4 Methodology
|
| 90 |
+
|
| 91 |
+
# 4.1 Approximation Strategy for Denoiser
|
| 92 |
+
|
| 93 |
+
One-step Denoiser Parameterization. To synthesize an image from a sampled input $\boldsymbol{x}_t$ at a large time step $t$ in one-step, a natural approach is to adopt the strategy from [9] that employs a neural network $\epsilon_{\theta}(\boldsymbol{x}_t,t)$ to predict a standard Gaussian distribution, which implements the denoising mapping parameterized as $f_{\theta}(\boldsymbol{x}_t,t) = \frac{\boldsymbol{x}_t - \sigma(t)\epsilon_{\theta}(\boldsymbol{x}_t,t)}{\alpha(t)}$ . By redefining the target distribution for $(\boldsymbol{x}_t,t)$ as $\boldsymbol{x}_0^t = \alpha (t)\boldsymbol{x}_0 \sim p_{data,t}(\alpha (t)\boldsymbol{x})$ and $D_{\theta}(\boldsymbol{x}_t,t) = \alpha (t)*f_{\theta}(\boldsymbol{x}_t,t) = \boldsymbol{x}_t - \sigma (t)\epsilon_{\theta}(\boldsymbol{x}_t,t)$ , this predictive formulation can be recast into a canonical denoiser function defined in [13] that aims to minimize the denoising error as follows:
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathcal {L} _ {\boldsymbol {D}} (\boldsymbol {\theta}) = \mathbb {E} _ {\boldsymbol {x} _ {0} ^ {t} \sim p _ {d a t a, t}, \boldsymbol {x} _ {t} \sim \mathcal {N} \left(\boldsymbol {x} _ {0} ^ {t}, \sigma (t) ^ {2} I\right)} [ | D _ {\boldsymbol {\theta}} (\boldsymbol {x} _ {t}, t) - \alpha (t) \boldsymbol {x} _ {0} | ], \tag {8}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $|\cdot|$ is an estimation of the error vector (e.g., a L2 distance). However, the Eq. (8) is hard to be optimized in practice. For instance, when $\alpha(t)$ decreases over time step $t$ which implies $\alpha(t)\pmb{x}_0 \rightarrow \mathbf{0}$ , the training is likely to collapse and the denoiser is taught to generally give a zero output.
|
| 100 |
+
|
| 101 |
+
Approximation Strategy in Consistency Models. We observe that, consistency models [24, 41] provide a solution to the aforementioned issues by leveraging the consistency property. As we presume that we have obtained a good prediction result $f_{\theta}(\pmb{x}_{t - k}) \approx \pmb{x}_0$ , from a time step $t - k$ ahead of $t$ for $k$ steps, this property yields an approximated error estimation of Eq. (8) as:
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\mathbb {E} \left[ \left| \boldsymbol {D} _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {t}, t\right) - \alpha (t) \boldsymbol {f} _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {t - k}, t - k\right) \right| \right]. \tag {9}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
By incorporating the expressions for $\pmb{f}_{\pmb{\theta}}(\pmb{x}_{t-k}, t-k)$ and $\pmb{D}_{\pmb{\theta}}(\pmb{x}_t, t)$ , we derive the approximated error estimation based on $\pmb{\epsilon}_{\pmb{\theta}}(\cdot, \cdot)$ as:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathcal {L} _ {\text {A p p r o x}} (\boldsymbol {\theta}) = \mathbb {E} [ | \boldsymbol {x} _ {t} - \frac {\alpha (t)}{\alpha (t - k)} \boldsymbol {x} _ {t - k} + \frac {\alpha (t)}{\alpha (t - k)} \sigma (t - k) \boldsymbol {\epsilon} _ {\boldsymbol {\theta}} (\boldsymbol {x} _ {t - k}, t - k) - \sigma (t) \boldsymbol {\epsilon} _ {\boldsymbol {\theta}} (\boldsymbol {x} _ {t}, t) ], \tag {10}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where the mentioned impact on optimization is reduced as the coefficient is amplified by $\alpha (t - k)$ . When $k$ is limited to 1, the error between the mapping result $\pmb{f}_{\pmb{\theta}}(\pmb{x}_t,t)$ and the trajectory origin $\pmb{x}_0$ can be quantified by the accumulation of incremental approximated errors [41]: $|\pmb{x}_0 - \pmb{f}_{\pmb{\theta}}(\pmb{x}_t,t)| \leq \sum_{1 \leq t' \leq t} |\pmb{f}_{\pmb{\theta}}(\pmb{x}_{t'},t') - \pmb{f}_{\pmb{\theta}}(\pmb{x}_{t' - 1},t' - 1)|$ . Ideally if the error of one single approximation can be bounded, we can reduce the cumulative error by decreasing the number of approximations. This technique, also called SKIPPING-STEP in LCM [24], extends to optimize the error for skipping sampled points on the trajectories as $|\pmb{f}_{\pmb{\theta}}(\pmb{x}_{t'},t') - \pmb{f}_{\pmb{\theta}}(\pmb{x}_{t' - k},t' - k)|$ for a fixed skipping step size $k$ . However, our insights reveal this precondition does not hold for extended situations. Denote $\{\pmb{x}_{t'}\}_{t' \in [t - k,t]}$ as the sub-path between $\pmb{x}_{t - k}$ and $\pmb{x}_t$ from the original PF-ODE trajectory, we discern that the learning objective in Eq. (10) for $\epsilon_{\pmb{\theta}}(\pmb{x}_t,t)$ can be decomposed into two complementary components: 1) $dist_{\Delta}(\pmb{x}_{t - k},\pmb{x}_t,t) = \pmb{x}_t - \frac{\alpha(t)}{\alpha(t - k)}\pmb{x}_{t - k}$ , which gauges the incremental distance from $\pmb{x}_{t - k}$ to $\pmb{x}_t$ attributable to the drift and diffusion processes, and 2) $dist_{0,\pmb{\theta}}(\pmb{x}_{t - k},t - k,t) = \frac{\alpha(t)}{\alpha(t - k)}\sigma(t - k)\epsilon_{\pmb{\theta}}(\pmb{x}_{t - k},t - k)$ , which captures the denoising contribution from previous time steps that should be coherently propagated to subsequent time steps $t$ . Thus we rewrite Eq. (10) as a sub-path learning objective:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\mathcal {L} _ {\text {S u b - p}} (\boldsymbol {\theta}, k) = \mathbb {E} [ | d i s t _ {\Delta} (\boldsymbol {x} _ {t}, \boldsymbol {x} _ {t - k}, t) + d i s t _ {0, \boldsymbol {\theta}} (\boldsymbol {x} _ {t - k}, t - k, t) - \sigma (t) \epsilon_ {\boldsymbol {\theta}} (\boldsymbol {x} _ {t}, t) | ]. \tag {11}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
In Eq. (11), the learning process of $dist_{\Delta}$ equates to modeling the denoising distribution $p(\boldsymbol{x}_{t-k}|\boldsymbol{x}_t)$ , which deviates from Gaussian for larger skipping step sizes and is found to be intractable to estimate [13,21,22,45,46]. Consequently, the approximated error escalates uncontrollably with increased $k$ due to reliance on the flawed learning. Although LCM sets an empirical $k$ of 20 to balance the pros and cons, the fundamental issues remain unaddressed and unexplored.
|
| 120 |
+
|
| 121 |
+
# 4.2 Sub-Path Linear Approximation Model
|
| 122 |
+
|
| 123 |
+
To improve the learning objective in Eq. (11), in this paper we introduce a new approach for accelerating diffusion models termed Sub-path Linear Approximation Model (SPLAM). SPLAM introduces Sub-Path Linear (SL) ODEs to approximate the sub-paths on the PF-ODE trajectories as a linear interpolation between the according sub-path endpoints. As the optimization based on such SL-ODEs gives a respectively progressive and continuous estimation for the decomposed two terms in Eq. (11), our SPLAM is trained based on the conducted SL-ODE learning objectives, and achieves smaller overall prediction errors and better generation quality. We also develop an efficient distillation procedure for latent diffusion models [31], with Multiple Estimation strategy which improves the estimated results of teacher models.
|
| 124 |
+
|
| 125 |
+
Sub-Path Linear ODE Based on the above analysis, in this paper, we introduce Sub-Path Linear (SL) ODEs to model approximated sub-paths in the original PF-ODE trajectories, which gives a progressive estimation for $dist_{\Delta}$ . For a sampled sub-path $\{\pmb{x}_{t'}\}_{t \in [t - k, t]}$ on a solution trajectory dictated by Eq. (3), we interpolate a linear path from $(\pmb{x}_{t - k}, t - k)$ to $(\pmb{x}_t, t)$ , guided by the vector direction of $dist_{\Delta}(\pmb{x}_t, \pmb{x}_{t - k}, t)$ . To distinguish the impacts of $dist_{\Delta}$ and $dist_{0,\theta}$ , we account for the drift component in the linear approximated path, causing a shift on coefficient from $(\pmb{x}_{t - k}, t - k)$ to $(\frac{\alpha(t)}{\alpha(t - k)}, \pmb{x}_{t - k}, t - k)$ . The points on the approximated path $\{\pmb{x}_{\gamma,t}\}_{\gamma \in [0,1]}$ are thus computed as:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\begin{array}{l} \boldsymbol {x} _ {\gamma , t} = \frac {\alpha (t)}{\alpha (t - k)} \boldsymbol {x} _ {t - k} + \gamma * d i s t _ {\Delta} (\boldsymbol {x} _ {t}, \boldsymbol {x} _ {t - k}, t) \\ = (1 - \gamma) \frac {\alpha (t)}{\alpha (t - k)} \boldsymbol {x} _ {t - k} + \gamma \boldsymbol {x} _ {t}, \tag {12} \\ \end{array}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
for a sampled $(\pmb{x}_{t - k}, t - k)$ and $(\pmb{x}_t, t)$ .
|
| 132 |
+
|
| 133 |
+
Since $\pmb{x}_t$ and $\pmb{x}_{t - k}$ conform to distributions governed by the PF-ODE, our linear transformation effectively defines a linear ODE from distribution $\frac{\alpha(t)}{\alpha(t - k)}\pmb{x}_{t - k}\sim p_{t - k,k}(\pmb{x}_{t - k})$ to $\pmb{x}_t\sim p_t(\pmb{x}_t)$ over $\gamma$ , where $p_{t,k}(\pmb{x}_t)$ has the property $p_{t,k}(\pmb{x}_t|\pmb{x}_0) = \mathcal{N}(\alpha (t + k)\pmb{x}_0,\left[\frac{\alpha(t + k)\sigma(t)}{\alpha(t)}\right]^2 I)$ :
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
d \boldsymbol {x} _ {\gamma , t} = [ \gamma * \operatorname {d i s t} _ {\Delta} (\boldsymbol {x} _ {t}, \boldsymbol {x} _ {t - k}, t) ] d \gamma . \tag {13}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
We denote it as Sub-Path Linear (SL) ODE. To apply the approximation strategy on the SL-ODE, the Denoiser and generation function replacing $\pmb{x}_t$ with $\pmb{x}_{\gamma,t}$ are given by:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\begin{array}{l} \boldsymbol {D} _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {\gamma , t}, \gamma , t\right) = \boldsymbol {x} _ {\gamma , t} - \sigma (\gamma , t) \epsilon_ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {\gamma , t}, \gamma , t\right), \\ \boldsymbol {f} _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {\gamma , t}, \gamma , t\right) = \frac {\boldsymbol {D} _ {\boldsymbol {\theta}} \left(\boldsymbol {x} _ {\gamma , t} , \gamma , t\right)}{\alpha (t)}. \tag {14} \\ \end{array}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
Incorporating these into Eq. (11), we derive the sub-path learning objective for our SPLAM model as:
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\mathcal {L} _ {\text {S P L A M}} (\boldsymbol {\theta}, k) = \mathbb {E} [ | \gamma * \operatorname {d i s t} _ {\Delta} (\boldsymbol {x} _ {t}, \boldsymbol {x} _ {t - k}, t) + \operatorname {d i s t} _ {0, \boldsymbol {\theta}} (\boldsymbol {x} _ {t - k}, t, t - k) - \sigma (\gamma , t) \boldsymbol {\epsilon} _ {\boldsymbol {\theta}} (\boldsymbol {x} _ {\gamma , t}, \gamma , t) | ], \tag {15}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
which gives a progressive estimation for the otherwise intractable $dist_{\Delta}$ objective. The value for $\sigma(\gamma, t)$ can be precisely estimated from the distribution $p_t(\boldsymbol{x}_t)$ and $p_{t-k}(\boldsymbol{x}_{t-k})$ but has a complex expression. Empirically we utilize an approximate result as $\sigma(\gamma, t) = (1 - \gamma)\frac{\alpha(t)}{\alpha(t - k)}\sigma(t - k) + \gamma*\sigma(t)$ . Compared to consistency models which adopt Eq. (10) or Eq. (11), our $\mathcal{L}$ maintains a progressive estimation for $dist_{\Delta}$ and a consistent estimation for $dist_{0,\theta}$ , which enables the learning for large skipping step size. The overall prediction error can still be assessed by the aggregate of approximated errors between the sub-path endpoints and the approximated error between these points is continuously optimized through the SL-ODEs. Consequently, the optimization for the approximated errors in our SPLAM could be significantly improved. Our approach could further benefit from the increased skipping step size, allowing our method to generate images of higher quality with reduced sampling steps in more efficient training.
|
| 152 |
+
|
| 153 |
+
Sub-Path Linear Approximation Distillation In this paper, we adopt pretrained Stable Diffusion (SD) models [31] to obtain the solution PF-ODE trajectories which we build our SL-ODEs upon, and we call the approach Sub-path Linear Approximation Distillation (SPLAD). To achieve conditional generation with the conditional input $c$ , the noise prediction model is parameterized as $\epsilon_{\theta}(z_t,c,t)$ [21, 43]. We also introduce $\gamma$ into the prediction models for solving our SL-ODEs, and leverage the $\gamma$ -conditioned training where $\gamma$ is converted to Fourier embeddings and fed into the models as an input. Specifically, to predict $z_0$ in the latent space, the generation function for SPLAM is defined as:
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
\boldsymbol {F} _ {\boldsymbol {\theta}} \left(\boldsymbol {z} _ {\gamma , t}, c, \gamma , t\right) = c _ {\text {s k i p}} (t) \boldsymbol {z} _ {\gamma , t} + c _ {\text {o u t}} (t) \boldsymbol {f} _ {\boldsymbol {\theta}} \left(\boldsymbol {z} _ {\gamma , t}, c, \gamma , t\right), \tag {16}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
where $f_{\theta}(z_{\gamma,t}, c, \gamma, t)$ mirrors Eq. (14) while replacing $\epsilon_{\theta}(z_{\gamma,t}, \gamma, t)$ with the conditional form $\epsilon_{\theta}(z_{\gamma,t}, c, \gamma, t)$ . The functions $c_{\mathrm{skip}}$ and $c_{\mathrm{out}}$ are leveraged to ensure that $F_{\theta}(z_{1,0}, c, 1, 0) \equiv z_0$ (we regard $F_{\theta}$ as the same expression of $f_{\theta}$ since $c_{\mathrm{skip}}(t) \ll c_{\mathrm{out}}(t)$ for most time steps). Integrating this with Eq. (9), our SPLAD approach minimizes the following objective:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
\mathcal {L} _ {\text {S P L A D}} \left(\boldsymbol {\theta}, \boldsymbol {\theta} ^ {-}; \phi\right) = \mathbb {E} _ {\boldsymbol {z} _ {0} \sim p _ {d a t a}, t \sim \mathcal {U} [ k, T ], \gamma \sim \mathcal {U} [ 0, 1 ]} \left[ \left| \boldsymbol {F} _ {\boldsymbol {\theta}} \left(\boldsymbol {z} _ {\gamma , t}, c, \gamma , t\right) - \boldsymbol {F} _ {\boldsymbol {\theta} ^ {-}} \left(\hat {\boldsymbol {z}} _ {1, t - k} ^ {\Phi}, c, 1, t - k\right) \right| \right], \tag {17}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
where $\mathcal{U}$ denotes the uniform distribution, and $k$ is a pre-determined skipping step size. The $\alpha(t)$ in Eq. (9) is omitted due to its negligible effect on optimization in practice. The term $\hat{z}_{1,t-k}^{\Phi} = \hat{z}_{t-k}^{\Phi}$ is estimated using ODE solvers $\Phi(\cdots;\phi)$ derived from teacher models. In this paper DDIM [39] is employed as our choice from the advanced solvers of LDMs. Moreover, to improve the estimation of $\hat{z}_{t-k}^{\Phi}$ , we apply the Multiple Estimation which executes the solver $\Phi(\cdots,\phi)$ multiple times with a reduced skipping step size $k_{\phi}$ . Denoting $t_{\phi,i} = t - i * k_{\phi}$ and initializing $\hat{z}_{t_{\phi,0}}^{\Phi} = z_t$ , the multiple estimation is iteratively executed as:
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\hat {\boldsymbol {z}} _ {t _ {\phi , i + 1}} ^ {\Phi} = \hat {\boldsymbol {z}} _ {t _ {\phi , i}} ^ {\Phi} + w \Phi (\hat {\boldsymbol {z}} _ {t _ {\phi , i}} ^ {\Phi}, t _ {\phi , i}, t _ {\phi , i + 1}, c; \phi) + (1 - w) \Phi (\hat {\boldsymbol {z}} _ {t _ {\phi , i}} ^ {\Phi}, t _ {\phi , i}, t _ {\phi , i + 1}, \emptyset ; \phi), \tag {18}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
for $i = 0,1,2,\ldots ;i\leq \frac{k}{k_{\phi}} -1$ , where $\emptyset$ denotes no conditional inputs and $w$ is a fixed guidance scale which controls the effect of conditional generation [10] from
|
| 172 |
+
|
| 173 |
+
Algorithm 1 Sub-Path Linear Approximation Distillation (SPLAD)
|
| 174 |
+
Input: dataset $\mathcal{D}$ , initial model parameter $\pmb{\theta}$ , learning rate $\eta$ , EMA decay rate $\mu$ ODE solver $\varPhi(\cdot,\cdot;\phi)$ , distance estimation $|\cdot|$ , a fixed guidance scale $w$ , step size $k$ VAE encoder $\mathcal{E}(\cdot)$ , noise schedule $\alpha(t),\sigma(t)$ $\pmb{\theta}^{-}\gets \pmb{\theta}$
|
| 175 |
+
repeat
|
| 176 |
+
sample $(x,c)\sim \mathcal{D},t\sim \mathcal{U}[k,T]$ and $\gamma \sim \mathcal{U}[0,1]$
|
| 177 |
+
convert $x$ into latent space: $z = \mathcal{E}(x)$
|
| 178 |
+
sample $\pmb{z}_t\sim \mathcal{N}(\alpha(t)z,\sigma(z)^2I)$ $\hat{z}_{t_{\phi,0}}^{\Phi}\gets z_t,i\gets 0$
|
| 179 |
+
repeat
|
| 180 |
+
$\hat{z}_{t_{\phi,i+1}}^{\Phi}\gets \hat{z}_{t_{\phi,i}}^{\Phi} + w\Phi(\hat{z}_{t_{\phi,i}}^{\Phi},t_{\phi,i},t_{\phi,i+1},c;\phi) + (1-w)\Phi(\hat{z}_{t_{\phi,i}}^{\Phi},t_{\phi,i},t_{\phi,i+1},\emptyset;\phi)$ $i\gets i+1$
|
| 181 |
+
until $k=i*k_{\phi}$ $\pmb{z}_{\gamma,t}\gets(1-\gamma)*\frac{\alpha(t)}{\alpha(t-k)}\hat{\pmb{z}}_{i-k}^{\Phi}+\gamma*\pmb{z}_t$ ▷ Sample a point on the SL-ODE.
|
| 182 |
+
$\mathcal{L}(\pmb{\theta},\pmb{\theta}^{-};\phi)\gets|(\pmb{F}_{\pmb{\theta}}(\pmb{z}_{\gamma,t},c,\gamma,t)-\pmb{F}_{\pmb{\theta}}(\hat{\pmb{z}}_{1,t-k}^{\Phi},c,1,t-k))|$ $\pmb{\theta}\gets\pmb{\theta}-\eta\nabla_{\pmb{\theta}}\mathcal{L}(\pmb{\theta},\pmb{\theta}^{-};\phi)$ $\pmb{\theta}^{-}\gets\mathrm{stopgrad}(\mu\pmb{\theta}^{-}+(1-\mu)\pmb{\theta})$
|
| 183 |
+
until convergence
|
| 184 |
+
|
| 185 |
+
the conditional input $c$ . The pseudo-code for SPLAD is presented in Algorithm 1. SPLAD shares a similar training pipeline with consistency models [24, 41] but can be distinguished as it optimizes the sub-path learning objectives based on the SL-ODEs and utilizes the $\gamma$ -conditioned training. For a pair of input noise and time step $(z_{t}, t)$ , SPLAM gives the prediction of the denoised latent $\hat{z}_{0}$ as:
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\hat {z} _ {0} = \boldsymbol {F} _ {\boldsymbol {\theta} ^ {-}} \left(\boldsymbol {z} _ {1, t}, c, 1, t\right), \tag {19}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
for one-step generation, adhering strictly to the $\gamma = 1$ condition. We also use the same iterative sample strategy as illustrated in [41] which could improve the quality of the generated images. In practice, we set the $\gamma$ -embedding to $\mathbf{0}$ for $\gamma = 1$ , thereby allowing the weights associated with trained $\gamma$ -embeddings to be discarded post-training. Thus our Sub-Path Linear Approximation Model (SPLAM) necessitates no additional parameters beyond the training phase and can be utilized the same as teacher models.
|
| 192 |
+
|
| 193 |
+
# 5 Experiments
|
| 194 |
+
|
| 195 |
+
In this section, we conduct experiments to examine the performance of our proposed Sub-Path Linear Approximation Model (SPLAM). Firstly, we describe the experiment configuration and implementation details, and evaluate our models comprehensively on the text-to-image task (Sec. 5.1). Secondly, we verify the effectiveness of our algorithm design through detailed ablation studies (Sec. 5.2). Finally, we present the qualitative results of our SPLAM. (Sec. 5.3).
|
| 196 |
+
|
| 197 |
+
Table 1: Quantitative results for SDv2.1-base with $w = 8$ . The results of DDIM, DPM, DPM++ and LCM* for LAION test-set are derived from [24]. LCM (fix $w$ ) is our reproduction conducted as stated in the paper. The results on COCO-30k are evaluated by us.
|
| 198 |
+
|
| 199 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="6">LAION-Aesthetics-6+</td><td colspan="6">COCO-30k</td></tr><tr><td colspan="3">FID(↓)</td><td colspan="3">CLIP-Score(↑)</td><td colspan="3">FID(↓)</td><td colspan="3">CLIP-Score(↑)</td></tr><tr><td></td><td>1 Step</td><td>2 Steps</td><td>4 Steps</td><td>1 Step</td><td>2 Steps</td><td>4 Steps</td><td>1 Step</td><td>2 Steps</td><td>4 Steps</td><td>1 Steps</td><td>2 Steps</td><td>4 Steps</td></tr><tr><td>DDIM [39]</td><td>183.29</td><td>81.05</td><td>22.38</td><td>6.03</td><td>14.13</td><td>25.89</td><td>431.26</td><td>229.44</td><td>32.77</td><td>2.88</td><td>7.72</td><td>28.76</td></tr><tr><td>DPM Solver [21]</td><td>185.78</td><td>72.81</td><td>18.53</td><td>6.35</td><td>15.10</td><td>26.64</td><td>206.37</td><td>73.87</td><td>22.04</td><td>10.56</td><td>22.87</td><td>31.18</td></tr><tr><td>DPM Solver++ [22]</td><td>185.78</td><td>72.81</td><td>18.43</td><td>6.35</td><td>15.10</td><td>26.64</td><td>206.35</td><td>73.82</td><td>22.11</td><td>10.57</td><td>22.87</td><td>31.16</td></tr><tr><td>LCM* [24]</td><td>35.36</td><td>13.31</td><td>11.10</td><td>24.14</td><td>27.83</td><td>28.69</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LCM (fix w) [24]</td><td>32.41</td><td>12.17</td><td>10.43</td><td>26.99</td><td>30.13</td><td>30.76</td><td>43.87</td><td>15.71</td><td>14.88</td><td>27.66</td><td>31.07</td><td>31.52</td></tr><tr><td>SPLAM</td><td>32.64</td><td>12.06</td><td>10.09</td><td>27.13</td><td>30.18</td><td>30.76</td><td>40.52</td><td>14.59</td><td>13.81</td><td>27.83</td><td>31.00</td><td>31.45</td></tr></table>
|
| 200 |
+
|
| 201 |
+
# 5.1 Text-to-Image Generation
|
| 202 |
+
|
| 203 |
+
Experimental Configuration On text-to-image generation task, we train two models with pre-trained Stable Diffusion-V1.5 (SDv1.5) and Stable Diffusion-V2.1-base (SDv2.1-base) as teacher models respectively. Following the setting of [24], the training dataset is one subset of LAION-5B [36]: LAION-Aesthetics-6+. We choose DDIM-Solver as the ODE solver $\phi$ at skipping step $k_{\phi} = 20$ .
|
| 204 |
+
|
| 205 |
+
For evaluation, we adopt the commonly used FID and CLIP Score metrics. The results are reported on both SDv1.5 and SDv2.1-base backbones, thus verifying the generalizability of our method. For the experiment of distilling SDv2.1-base, we bench-mark our model on two test sets, LAION-Aesthetics-6+ as used in LCM [24] and MSCOCO2014-30k for zero-shot generalization. We also reproduce a SDv2.1-base LCM according to the training configuration outlined in [24] while replacing the $w$ -condition with the fixed guidance scale, which has also improved the performance. We generally set the guidance scale for distilling SDv2.1-base to 8 and skipping step size to 20, which is consistent with [24]. For the experiment of distilling SDv1.5, we compare our model with state-of-the-art generative models including foundation diffusion models, GANs, and accelerated diffusion models. The guidance scale is set to 3 to obtain the optimal FID, and we adopt the huber [40] loss for our SPLAD metric. The skipping step size is set to 100 for SPLAM which has shown fast convergence. We examine our method on two commonly used benchmarks, MSCOCO2014-30k and MSCOCO2017-5k. More implementation details are provided in the supplementary materials.
|
| 206 |
+
|
| 207 |
+
Main Results The results for SDv2.1-base are presented in Tab. 1, we use DDIM [39], DPM [21], DPM++ [22] and LCM [24] as baselines. It reveals that our SPLAM surpasses baseline methods nearly across both test sets, at each step, and on both FID and CLIP Score metrics. We suppose that the close results on LAION are caused by overfitting, since the test set and train set are sourced from the same data collection. For SDv1.5 under the guidance scale $w = 3$ , the quantitative results are demonstrated in Tab. 2a and Tab. 2b. Our model with 4 steps gets FID-30k of 10.06 and FID-5k of 20.77, which outperforms
|
| 208 |
+
|
| 209 |
+
(a) Results on MSCOCO2014-30k, $w = 3$ .
|
| 210 |
+
|
| 211 |
+
Table 2: Quantitative results for SDv1.5. Baseline numbers are cited from [47] and [46]. All the results of LCM are our reproduction whose performance is aligned as stated in the paper. ${}^{ \dagger }$ Results are evaluated by us using the released models.
|
| 212 |
+
|
| 213 |
+
<table><tr><td>Family</td><td>Methods</td><td>Latency(↓)</td><td>FID(↓)</td></tr><tr><td rowspan="11">Unaccelerated</td><td>DALL-E [30]</td><td>-</td><td>27.5</td></tr><tr><td>DALL-E2 [29]</td><td>-</td><td>10.39</td></tr><tr><td>Parti-750M [48]</td><td>-</td><td>10.71</td></tr><tr><td>Parti-3B [48]</td><td>6.4s</td><td>8.10</td></tr><tr><td>Parti-20B [48]</td><td>-</td><td>7.23</td></tr><tr><td>Make-A-Scene [5]</td><td>25.0s</td><td>11.84</td></tr><tr><td>Muse-3B [4]</td><td>1.3</td><td>7.88</td></tr><tr><td>GLIDE [27]</td><td>15.0s</td><td>12.24</td></tr><tr><td>LDM [31]</td><td>3.7s</td><td>12.63</td></tr><tr><td>Imagen [32]</td><td>9.1s</td><td>7.27</td></tr><tr><td>eDiff-I [1]</td><td>32.0s</td><td>6.95</td></tr><tr><td rowspan="3">GANs</td><td>LAFITE [51]</td><td>0.02s</td><td>26.94</td></tr><tr><td>StyleGAN-T [35]</td><td>0.10s</td><td>13.90</td></tr><tr><td>GigaGAN [12]</td><td>0.13s</td><td>9.09</td></tr><tr><td rowspan="11">Accelerated Diffusion</td><td>DPM++ (4step) [22]</td><td>0.26s</td><td>22.36</td></tr><tr><td>UniPC (4step) [49]</td><td>0.26s</td><td>19.57</td></tr><tr><td>LCM-LoRA (4step) [25]</td><td>0.19s</td><td>23.62</td></tr><tr><td>InstaFlow-0.9B [20]</td><td>0.09s</td><td>13.10</td></tr><tr><td>InstaFlow-1.7B [20]</td><td>0.12s</td><td>11.83</td></tr><tr><td>UFOGen [46]</td><td>0.09s</td><td>12.78</td></tr><tr><td>DMD [47]</td><td>0.09s</td><td>11.49</td></tr><tr><td>LCM (2step) [24]</td><td>0.12s</td><td>14.29</td></tr><tr><td>SPLAM (2step)</td><td>0.12s</td><td>12.31</td></tr><tr><td>LCM (4step) [24]</td><td>0.19s</td><td>10.68</td></tr><tr><td>SPLAM (4step)</td><td>0.19s</td><td>10.06</td></tr><tr><td>Teacher</td><td>SDv1.5 [31]†</td><td>2.59s</td><td>8.03</td></tr></table>
|
| 214 |
+
|
| 215 |
+
(b) Results on MSCOCO2017-5k, $w = 3$
|
| 216 |
+
|
| 217 |
+
<table><tr><td>Methods</td><td>#Step</td><td>Latency(↓)</td><td>FID(↓)</td></tr><tr><td rowspan="2">DPM Solver++ [22]†</td><td>4</td><td>0.21s</td><td>35.0</td></tr><tr><td>8</td><td>0.34s</td><td>21.0</td></tr><tr><td rowspan="3">Progressive Distillation [33]</td><td>1</td><td>0.09s</td><td>37.2</td></tr><tr><td>2</td><td>0.13s</td><td>26.0</td></tr><tr><td>4</td><td>0.21s</td><td>26.4</td></tr><tr><td>CFG-Aware Distillation [15]</td><td>8</td><td>0.34s</td><td>24.2</td></tr><tr><td>InstaFlow-0.9B [20]</td><td>1</td><td>0.09s</td><td>23.4</td></tr><tr><td>InstaFlow-1.7B [20]</td><td>1</td><td>0.12s</td><td>22.4</td></tr><tr><td>UFOGen [46]</td><td>1</td><td>0.09s</td><td>22.5</td></tr><tr><td rowspan="2">LCM [24]</td><td>2</td><td>0.12s</td><td>25.22</td></tr><tr><td>4</td><td>0.19s</td><td>21.41</td></tr><tr><td rowspan="2">SPLAM</td><td>2</td><td>0.12s</td><td>23.07</td></tr><tr><td>4</td><td>0.19s</td><td>20.77</td></tr></table>
|
| 218 |
+
|
| 219 |
+
(c) Results on MSCOCO2014-30k, $w = 8$
|
| 220 |
+
|
| 221 |
+
<table><tr><td>Family</td><td>Methods</td><td>Latency(↓)</td><td>FID(↓)</td></tr><tr><td rowspan="8">Accelerated Diffusion</td><td>DPM++ (4step)</td><td>0.26s</td><td>22.44</td></tr><tr><td>UniPC (4step) [49]</td><td>0.26s</td><td>23.30</td></tr><tr><td>LCM-LoRA (4step) [25]</td><td>0.19s</td><td>23.62</td></tr><tr><td>DMD [47]</td><td>0.09s</td><td>14.93</td></tr><tr><td>LCM (2step) [24] [24]</td><td>0.12s</td><td>15.56</td></tr><tr><td>SPLAM (2step)</td><td>0.12s</td><td>14.50</td></tr><tr><td>LCM (4step) [24] [24]</td><td>0.19s</td><td>14.53</td></tr><tr><td>SPLAM (4step)</td><td>0.19s</td><td>13.39</td></tr><tr><td>Teacher</td><td>SDv1.5 [31]†</td><td>2.59s</td><td>13.05</td></tr></table>
|
| 222 |
+
|
| 223 |
+
all other accelerated diffusion models, including flow-based method InstaFlow [20] and techniques that introduce GAN objectives such as UFOGen [46] and DMD [47]. Furthermore, SPLAM showcases commensurate results with state-of-the-art foundation generative models such as DALL-E2 [29]. Even in two steps, SPLAM has achieved a competitive performance of FID-30k 12.31 with parallel algorithms. In practical scenarios, a higher guidance scale $w$ is typically favored to enhance the resultant image quality. Accordingly, we trained our SPLAM with $w$ set to 8 and bench-mark it against a range of advanced diffusion methodologies, as delineated in Tab. 2c. In this regime, SPLAM also demonstrates significant advantages, achieving state-of-the-art performance with a four-step FID-30k of 13.39 which exceeds other models by a large margin and is close to the teacher model. Notably, the FID-30k of our model with only two steps reaches 14.50, surpassing the four-step LCM and DMD. While DMD training consumes over one hundred A100 GPU days, which is more than 16 times our training duration.
|
| 224 |
+
|
| 225 |
+
# 5.2 Ablation Study
|
| 226 |
+
|
| 227 |
+
Skipping Step Size & Training Cost Fig. 2a ablates the skipping step size during training, where we compare SPLAM with or without the multiple estimation strategy (Sec. 4.2) and LCM. We can observe that: 1) Without multiple estimation, when the skipping step size $k$ is increasing, LCM suffers a more drastic decline in performance due to heightened optimization challenges for sub-path
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
(a)
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
(b)
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
(c)
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
Fig. 2: (a) Ablations on skipping step size and skipping mechanism. ME denotes for our Multiple Estimation strategy. (b) Training curve comparing LCM and SPLAM. Our SPLAM with step size 100 is conducted with ME, which brings faster convergence. (c) Estimation of the error $\delta$ between consistency mapping values of two adjacent points through PF-ODE. SPLAM consistently outperforms LCM in terms of the error.
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
Fig. 3: (a) Visualization for different guidance scale $w$ on SPLAM. (b) The trade-off curve of applying difference guidance scale. $w$ increases from $\{3.0, 5.0, 8.0, 12.0\}$ .
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
(a)
|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
$w = 12$
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
(b)
|
| 260 |
+
|
| 261 |
+
learning. Through leveraging our proposed Sub-Path Linear ODE, SPLAM can progressively learn the $dist_{\Delta}$ and effectively alleviate this collapse. 2) Equipped with the multiple estimation strategy, SPLAM is capable of stably maintaining high image fidelity with large steps. Moreover, we compare the convergence trends between our method and LCM during training, as depicted in Fig. 2b. When $k = 20$ , although our metrics initially converge more slowly during the early stages, the performance of our method gradually surpasses LCM by a large margin. It indicates that our training strategy provides a more effective learning objective, enabling SPLAM to achieve a better result, while LCM quickly becomes overfitted. As $k$ raised to 100, larger skipping step size brings SPLAM faster convergence that needs just 2K to 6K iterations which requires about only 6 A100 GPU days training, facilitating practical applications with fewer resources. Note that LCM needs $10k+$ iterations for optimal performance which costs about 16 A100 GPU days and can not be applied to larger skipping step size due to the serious performance gap.
|
| 262 |
+
|
| 263 |
+
Approximated Error Estimation for SPLAM. To illustrate the efficacy of our approach, we directly estimate the denoising mapping error between two ad-
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
Fig. 4: Comparison of our SPLAM and LCM [24] in 1,2 and 4-step generation. The results of LCM are based on our reproduction as illustrated in Sec. 5.1. SPLAM has generated consistently higher-quality images that are clearer and more detailed. Noteworthy is the remarkable performance of SPLAM in the 2-step generation, which aligns closely with the 4-step generation results of LCM, highlighting the efficiency and effectiveness of our approach in producing high-fidelity images with fewer generation steps.
|
| 267 |
+
|
| 268 |
+
jacent samples on the PF-ODE: $\delta (t,k) = \mathbb{E}[|f_{\pmb{\theta}}(\pmb{x}_{t_{n + k},t_{n + k}}),\pmb{f}_{\pmb{\theta}}(\pmb{x}_{t_n},t_n))|]$ , which is firstly defined in Eq. (6). The results are shown in Fig. 2c. We randomly selected 1000 samples from the COCO dataset and simulated adjacent points on the ODE by adding the same noise with adjacent timesteps. We utilize $k = 20$ and the corresponding 50 timesteps for the DDIM scheduler, disregarding steps smaller than 100 due to their relatively larger simulation deviation. It can be seen that, especially at larger timesteps, the error $\delta$ of our SPLAM is further reduced (about $10\%$ at $t = 800$ ). This observation substantiates that SPLAM indeed contributes to minimizing approximated errors, boosting the model's capacity for high-quality image generation.
|
| 269 |
+
|
| 270 |
+
The Effect of Guidance Scale $w$ . The guidance scale $w$ is a critical hyperparameter in Stable Diffusion [10,31], with its adjustment allowing users to alter the semantic alignment and the quality of the generated image. In this study, we also examine the impact of varying the guidance scale $w$ for our SPLAM based on SDv1.5, which is visualized in Fig. 3. As well as vanilla Stable Diffusion, while a higher $w$ value contributes to better sample quality as reflected by CLIP Scores, it concurrently leads to a degradation in FID performance and oversaturation.
|
| 271 |
+
|
| 272 |
+
# 5.3 Qualitative Results
|
| 273 |
+
|
| 274 |
+
To emphasize the boosted generation quality of our SPLAM, we display the 1,2 and 4-step generation results with the comparison to LCM [24] in Fig. 4. Moreover, we compare our SPLAM distilled from SDv1.5 [31] with the most advanced accelerating diffusion models in Fig. 5, which demonstrate that our SPLAM has achieved the best generation quality across the existing methods.
|
| 275 |
+
|
| 276 |
+
# 6 Conclusion
|
| 277 |
+
|
| 278 |
+
In this paper, we propose a novel approach Sub-Path Linear Approximation Models (SPLAM) for accelerating diffusion models. SPLAM leverages the ap
|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
(a)
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
(b)
|
| 285 |
+
Fig. 5: Qualitative Results. The text prompts are selected from DMD [47] in (a) and UFOGEN [46] in (b), and the results of the two are also cited from respective papers. Clearly, SPLAM demonstrates the best generation quality in 4-step generation except for the SD models. When decreasing the sampling step to 2, SPLAM still maintains a comparable performance, which generates even better results than 4-step LCM [24].
|
| 286 |
+
|
| 287 |
+
proximation strategy in consistency models and considers the PF-ODE trajectories as a series of interconnected sub-paths delineated by sampled points. Guided by the optimization direction charted by each sub-path, Sub-Path Linear (SL) ODEs also enable our approach to progressively and continuously optimize the approximated learning objectives and thus construct the denoising mappings with smaller cumulative errors. We also develop an efficient distillation procedure for SPLAM to enable the incorporation of latent diffusion models. Extensive experiments on LAION, MS COCO 2014 and MS COCO 2017 datasets have consistently demonstrated the superiority of our method across existing accelerating diffusion approaches in a few-step generation with a fast training convergence.
|
| 288 |
+
|
| 289 |
+
# Acknowledgments
|
| 290 |
+
|
| 291 |
+
This work is supported by the National Key R&D Program of China (No. 2022ZD0160900), the National Natural Science Foundation of China (No. 62076119, No. 61921006), the Fundamental Research Funds for the Central Universities (No. 020214380119), and the Collaborative Innovation Center of Novel Software Technology and Industrialization.
|
| 292 |
+
|
| 293 |
+
# References
|
| 294 |
+
|
| 295 |
+
1. Balaji, Y., Nah, S., Huang, X., Vahdat, A., Song, J., Kreis, K., Aittala, M., Aila, T., Laine, S., Catanzaro, B., et al.: edifi: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324 (2022)
|
| 296 |
+
2. Bao, F., Li, C., Zhu, J., Zhang, B.: Analytic-dpm: an analytic estimate of the optimal reverse variance in diffusion probabilistic models. arXiv preprint arXiv:2201.06503 (2022)
|
| 297 |
+
3. Berthelot, D., Autef, A., Lin, J., Yap, D.A., Zhai, S., Hu, S., Zheng, D., Talbot, W., Gu, E.: Tract: Denoising diffusion models with transitive closure time-distillation. arXiv preprint arXiv:2303.04248 (2023)
|
| 298 |
+
4. Chang, H., Zhang, H., Barber, J., Maschinot, A., Lezama, J., Jiang, L., Yang, M.H., Murphy, K., Freeman, W.T., Rubinstein, M., et al.: Muse: Text-to-image generation via masked generative transformers. In: ICML (2023)
|
| 299 |
+
5. Gafni, O., Polyak, A., Ashual, O., Sheynin, S., Parikh, D., Taigman, Y.: Make-ascene: Scene-based text-to-image generation with human priors. In: ECCV (2022)
|
| 300 |
+
6. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative Adversarial Nets. In: NIPS (2014)
|
| 301 |
+
7. Gu, J., Zhai, S., Zhang, Y., Liu, L., Susskind, J.M.: Boot: Data-free distillation of denoising diffusion models with bootstrapping. In: ICML 2023 Workshop on Structured Probabilistic Inference & Generative Modeling (2023)
|
| 302 |
+
8. Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. In: NeurIPS 2014 Deep Learning Workshop (2015)
|
| 303 |
+
9. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS (2020)
|
| 304 |
+
0. Ho, J., Salimans, T.: Classifier-free diffusion guidance. In: arXiv preprint arXiv:2207.12598 (2022)
|
| 305 |
+
1. Jolicoeur-Martineau, A., Li, K., Piché-Taillefer, R., Kachman, T., Mitliagkas, I.: Gotta go fast when generating data with score-based models. arXiv preprint arXiv:2105.14080 (2021)
|
| 306 |
+
2. Kang, M., Zhu, J.Y., Zhang, R., Park, J., Shechtman, E., Paris, S., Park, T.: Scaling up gans for text-to-image synthesis. In: CVPR (2023)
|
| 307 |
+
3. Karras, T., Aittala, M., Aila, T., Laine, S.: Elucidating the design space of diffusion-based generative models. In: NeurIPS (2022)
|
| 308 |
+
4. Kingma, D.P., Welling, M.: Auto-encoding variational bayes. In: ICLR (2014)
|
| 309 |
+
5. Li, Y., Wang, H., Jin, Q., Hu, J., Chemerys, P., Fu, Y., Wang, Y., Tulyakov, S., Ren, J.: Snapfusion: Text-to-image diffusion model on mobile devices within two seconds. Advances in Neural Information Processing Systems 36 (2024)
|
| 310 |
+
6. Lin, S., Wang, A., Yang, X.: Sdxl-lightning: Progressive adversarial diffusion distillation. arXiv preprint arXiv:2402.13929 (2024)
|
| 311 |
+
|
| 312 |
+
17. Lipman, Y., Chen, R.T., Ben-Hamu, H., Nickel, M., Le, M.: Flow matching for generative modeling. arXiv preprint arXiv:2210.02747 (2022)
|
| 313 |
+
18. Liu, L., Ren, Y., Lin, Z., Zhao, Z.: Pseudo numerical methods for diffusion models on manifolds. In: ICLR (2022)
|
| 314 |
+
19. Liu, X., Gong, C., Liu, Q.: Flow straight and fast: Learning to generate and transfer data with rectified flow. In: ICLR (2023)
|
| 315 |
+
20. Liu, X., Zhang, X., Ma, J., Peng, J., Liu, Q.: Instaflow: One step is enough for high-quality diffusion-based text-to-image generation. arXiv preprint arXiv:2309.06380 (2023)
|
| 316 |
+
21. Lu, C., Zhou, Y., Bao, F., Chen, J., Li, C., Zhu, J.: Dpm-solver: A fast ode solver for diffusion probabilistic model sampling in around 10 steps. In: NeurIPS (2022)
|
| 317 |
+
22. Lu, C., Zhou, Y., Bao, F., Chen, J., Li, C., Zhu, J.: Dpm-solver++: Fast solver for guided sampling of diffusion probabilistic models. In: arXiv preprint arXiv:2211.01095 (2022)
|
| 318 |
+
23. Luhman, E., Luhman, T.: Knowledge distillation in iterative generative models for improved sampling speed. arXiv preprint arXiv:2101.02388 (2021)
|
| 319 |
+
24. Luo, S., Tan, Y., Huang, L., Li, J., Zhao, H.: Latent consistency models: Synthesizing high-resolution images with few-step inference. arXiv preprint arXiv:2310.04378 (2023)
|
| 320 |
+
25. Luo, S., Tan, Y., Patil, S., Gu, D., von Platen, P., Passos, A., Huang, L., Li, J., Zhao, H.: Lcm-lora: A universal stable-diffusion acceleration module. arXiv preprint arXiv:2310.04378 (2023)
|
| 321 |
+
26. Meng, C., Rombach, R., Gao, R., Kingma, D., Ermon, S., Ho, J., Salimans, T.: On distillation of guided diffusion models. In: CVPR (2023)
|
| 322 |
+
27. Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., Chen, M.: Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In: ICML (2022)
|
| 323 |
+
28. Nichol, A.Q., Dhariwal, P.: Improved denoising diffusion probabilistic models. In: International Conference on Machine Learning. pp. 8162-8171. PMLR (2021)
|
| 324 |
+
29. Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 (2022)
|
| 325 |
+
30. Ramesh, A., Pavlov, M., Goh, G., Gray, S., Voss, C., Radford, A., Chen, M., Sutskever, I.: Zero-shot text-to-image generation. In: ICML (2021)
|
| 326 |
+
31. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR (2022)
|
| 327 |
+
32. Sahara, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.L., Ghasemipour, K., Gontijo Lopes, R., Karagol Ayan, B., Salimans, T., et al.: Photorealistic text-to-image diffusion models with deep language understanding. In: NeurIPS (2022)
|
| 328 |
+
33. Salimans, T., Ho, J.: Progressive distillation for fast sampling of diffusion models. In: ICLR (2022)
|
| 329 |
+
34. Sauer, A., Lorenz, D., Blattmann, A., Rombach, R.: Adversarial diffusion distillation. arXiv preprint arXiv:2311.17042 (2023)
|
| 330 |
+
35. Sauer, A., Schwarz, K., Geiger, A.: Stylegan-xl: Scaling stylegan to large diverse datasets. In: SIGGRAPH (2022)
|
| 331 |
+
36. Schuhmann, C., Beaumont, R., Vencu, R., Gordon, C., Wightman, R., Cherti, M., Coombes, T., Katta, A., Mullis, C., Wortsman, M., et al.: Laion-5b: An open large-scale dataset for training next generation image-text models. In: NeurIPS (2022)
|
| 332 |
+
37. Sohl-Dickstein, J., Weiss, E., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics. In: ICML (2015)
|
| 333 |
+
|
| 334 |
+
38. Sohn, K., Lee, H., Yan, X.: Learning structured output representation using deep conditional generative models. Advances in neural information processing systems 28 (2015)
|
| 335 |
+
39. Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. In: ICLR (2021)
|
| 336 |
+
40. Song, Y., Dhariwal, P.: Improved techniques for training consistency models. arXiv preprint arXiv:2310.14189 (2023)
|
| 337 |
+
41. Song, Y., Dhariwal, P., Chen, M., Sutskever, I.: Consistency models. In: ICML (2023)
|
| 338 |
+
42. Song, Y., Ermon, S.: Generative modeling by estimating gradients of the data distribution. In: NeurIPS (2019)
|
| 339 |
+
43. Song, Y., Sohl-Dickstein, J., Kingma, D.P., Kumar, A., Ermon, S., Poole, B.: Score-based generative modeling through stochastic differential equations. In: ICLR (2021)
|
| 340 |
+
44. Tong, A., Malkin, N., Huguet, G., Zhang, Y., Rector-Brooks, J., Fatras, K., Wolf, G., Bengio, Y.: Improving and generalizing flow-based generative models with minibatch optimal transport. arXiv preprint arXiv:2302.00482 (2023)
|
| 341 |
+
45. Xiao, Z., Kreis, K., Vahdat, A.: Tackling the generative learning trilemma with denoising diffusion gans. In: ICLR (2022)
|
| 342 |
+
46. Xu, Y., Zhao, Y., Xiao, Z., Hou, T.: Ufogen: You forward once large scale text-to-image generation via diffusion gans. arXiv preprint arXiv:2311.09257 (2023)
|
| 343 |
+
47. Yin, T., Gharbi, M., Zhang, R., Shechtman, E., Durand, F., Freeman, W.T., Park, T.: One-step diffusion with distribution matching distillation. arXiv preprint arXiv:2311.18828 (2023)
|
| 344 |
+
48. Yu, J., Xu, Y., Koh, J.Y., Luong, T., Baid, G., Wang, Z., Vasudevan, V., Ku, A., Yang, Y., Ayan, B.K., et al.: Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789 2(3), 5 (2022)
|
| 345 |
+
49. Zhao, W., Bai, L., Rao, Y., Zhou, J., Lu, J.: Unipc: A unified predictor-corrector framework for fast sampling of diffusion models. arXiv preprint arXiv:2302.04867 (2023)
|
| 346 |
+
50. Zheng, H., Nie, W., Vahdat, A., Azizzadenesheli, K., Anandkumar, A.: Fast sampling of diffusion models via operator learning. In: ICML (2023)
|
| 347 |
+
51. Zhou, Y., Zhang, R., Chen, C., Li, C., Tensmeyer, C., Yu, T., Gu, J., Xu, J., Sun, T.: Towards language-free training for text-to-image generation. In: CVPR (2022)
|
| 348 |
+
52. Zhou, Z., Chen, D., Wang, C., Chen, C.: Fast ode-based sampling for diffusion models in around 5 steps. arXiv preprint arXiv:2312.00094 (2023)
|
acceleratingimagegenerationwithsubpathlinearapproximationmodel/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d07a2c9a504898578b0abb39f8e3a4fc097d89045690268a0fcdc00315963437
|
| 3 |
+
size 653203
|
acceleratingimagegenerationwithsubpathlinearapproximationmodel/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b05ab844307a9ab35be694e1c006884803ba0c5482bcc96a90f3b5c07c4e03f
|
| 3 |
+
size 530136
|
acceleratingimagesuperresolutionnetworkswithpixellevelclassification/03b7ebd8-cf31-4867-9bee-a34beac45e82_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:28657b7c89d40a598d4cf4d0624ff1bdfeecf5a70d23c0c3720ffd13008fc3b9
|
| 3 |
+
size 71951
|
acceleratingimagesuperresolutionnetworkswithpixellevelclassification/03b7ebd8-cf31-4867-9bee-a34beac45e82_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:308222bafc675112c47d64cdd365879c4e9ffb570a691b83cbab5675ebfd8164
|
| 3 |
+
size 84789
|
acceleratingimagesuperresolutionnetworkswithpixellevelclassification/03b7ebd8-cf31-4867-9bee-a34beac45e82_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20ff141b5d6e2eb0cb2beb797a38ee39b7067e59ae3162ba9307093e64ab2d40
|
| 3 |
+
size 3479801
|
acceleratingimagesuperresolutionnetworkswithpixellevelclassification/full.md
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Image Super-Resolution Networks with Pixel-Level Classification
|
| 2 |
+
|
| 3 |
+
Jinho Jeong<sup>1</sup>, Jinwoo Kim<sup>1</sup>, Younghyun Jo<sup>2</sup>, and Seon Joo Kim<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
Yonsei University
|
| 6 |
+
|
| 7 |
+
$^{2}$ Samsung Advanced Institute of Technology
|
| 8 |
+
|
| 9 |
+
Abstract. In recent times, the need for effective super-resolution (SR) techniques has surged, especially for large-scale images ranging 2K to 8K resolutions. For DNN-based SISR, decomposing images into overlapping patches is typically necessary due to computational constraints. In such patch-decomposing scheme, one can allocate computational resources differently based on each patch's difficulty to further improve efficiency while maintaining SR performance. However, this approach has a limitation: computational resources is uniformly allocated within a patch, leading to lower efficiency when the patch contain pixels with varying levels of restoration difficulty. To address the issue, we propose the Pixel-level Classifier for Single Image Super-Resolution (PCSR), a novel method designed to distribute computational resources adaptively at the pixel level. A PCSR model comprises a backbone, a pixel-level classifier, and a set of pixel-level upsamplers with varying capacities. The pixel-level classifier assigns each pixel to an appropriate upsampler based on its restoration difficulty, thereby optimizing computational resource usage. Our method allows for performance and computational cost balance during inference without re-training. Our experiments demonstrate PCSR's advantage over existing patch-distributing methods in PSNR-FLOP trade-offs across different backbone models and benchmarks. The code will be available at https://github.com/3587jjh/PCSR.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Single Image Super-Resolution (SISR) is a task focused on restoring a high-resolution (HR) image from its low-resolution (LR) counterpart. The task has wide real-life applications across diverse fields, including but not limited to digital photography, medical imaging, surveillance, and security. In line with these significant demands, SISR has advanced in last decades, especially with Deep Neural Networks (DNNs) [6, 12, 14, 16, 23, 24].
|
| 14 |
+
|
| 15 |
+
However, as the new SISR models come out, both capacity and computational cost tend to go up, making it hard to apply the models in real-world applications or devices with limited resources. Therefore, it has led to a shift towards designing simpler, efficient lightweight models [2, 7, 8, 15, 19, 25] that consider a balance between performance and computational cost. In addition,
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Computation: Large Med. Small
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
PSNR: 26.45 dB
|
| 22 |
+
FLOPs: 180.4G (100%)
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
PSNR: 26.31 dB
|
| 26 |
+
FLOPs: 134.3G (74%)
|
| 27 |
+
Fig. 1: The SR result on the image "1228" (Test2K), $\times 4$ . By adaptively distributing computational resources in a pixel-wise manner, our method can reduce the overall computational costs in terms of FLOPs compared to the patch-distributing method, while also achieving a better PSNR score.
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
PSNR:26.38dB FLOPs:115.0G $(64\%)$
|
| 31 |
+
|
| 32 |
+
extensive researches [4, 10, 13, 17, 20, 21] have been developed to reduce the parameter size and/or the number of floating-point operations (FLOPs) of existing models without compromising their performance.
|
| 33 |
+
|
| 34 |
+
In parallel, there has been a growing demand for efficient SR, particularly with the rise of platforms that provide large-scale images for users such as advanced smartphones, high-definition televisions, or professional-grade monitors that support resolutions ranging from 2K to 8K. Nevertheless, SR on a large image is challenging; a large image cannot be processed in a single pass (i.e., per-image processing) due to the limitation in computational resources. Instead, a common approach for large image SR involves dividing a given LR image into overlapping patches, applying an SR model to each patch independently, and then merging the outputs to obtain a super-resolved image. Several studies [4,13,20] have explored the approach, namely per-patch processing approach, with the aim of enhancing the efficiency of existing models while preserving their performance. These studies share the observations that each patch varies in restoration difficulty, thus allocating different computational resources to each patch.
|
| 35 |
+
|
| 36 |
+
While adaptively distributing computational resources at the patch-level achieves remarkable improvements of efficiency, it has two limitations that may prevent it from fully leveraging the potential for higher efficiency: 1) Since SR is a low-level vision task, even a single patch can contain pixels with varying degrees of restoration difficulty. That is, when allocating large computational resources to a patch that includes easy pixels, it can lead to a waste of computational effort. Conversely, if a patch with a smaller allocation of computational resources contains hard pixels, it would negatively impact performance. 2) These so-called patch-distributing methods become less efficient with larger patch sizes, as they are more likely to contain a balanced mix of easy and hard pixels. It introduces a dilemma: we may want to use larger patches since it not only minimizes redundant operations from overlapping but also enhances performance by leveraging more contextual information.
|
| 37 |
+
|
| 38 |
+

|
| 39 |
+
(a) FSRCNN
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
(b)CARN
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
(c) SRResNet
|
| 46 |
+
Fig. 2: Visual comparison of PSNR and FLOPs between ClassSR, ARM, and PCSR (ours) on Test2K at scale $\times 4$ .
|
| 47 |
+
|
| 48 |
+
In this paper, our primary goal is to enhance the efficiency of existing SISR models, especially for larger images. To overcome the aforementioned limitations from patch-distributing methods, we propose a novel approach named Pixel-level Classifier for Single Image Super-Resolution (PCSR), which is specifically designed to adaptively distribute computational resources at the pixel-level. The model based on our method consists of three main parts: a backbone, a pixel-level classifier, and a set of pixel-level upsamplers with varying capacity. The model operates as follows: 1) The backbone takes an LR input and generates an LR feature map. 2) For each pixel in the HR space, the pixel-level classifier predicts the probability of assigning it to the specific upsampler using the LR feature map and the relative position of that pixel. 3) Accordingly, each pixel is assigned adaptively to a properly sized pixel-level upsampler to predict its RGB value. 4) Finally, super-resolved output is obtained by aggregating the RGB values of every pixels.
|
| 49 |
+
|
| 50 |
+
To the best of our knowledge, our method is the first to apply a pixel-wise distributing method in the context of efficient SR for large images. By cutting down redundant computations in a pixel-wise manner, we can further improve the efficiency of the patch-distributing approach, as illustrated in Fig. 1. During the inference phase, we offer users tunability to traverse the trade-off between performance and computational cost without the need for re-training. While our method enables users to manage the trade-off, we also provide an additional functionality that automatically assigns pixels based on the K-means clustering algorithm which can simplify the user experience. Lastly, we introduce a post-processing technique that effectively eliminates artifacts which can arise from the distribution of computation on a pixel-wise basis. Experiments show that our method outperforms existing patch-distributing approaches [4, 13] in terms of the PSNR-FLOP trade-off across various SISR models [7, 14, 25] on several benchmarks, including Test2K/4K/8K [13] and Urban100 [11]. We also compare our method with the per-image processing-based method [10], which process images in their entirety rather than decomposing them into patches.
|
| 51 |
+
|
| 52 |
+
# 2 Related Works
|
| 53 |
+
|
| 54 |
+
CNN-based SISR. The evolution of deep learning in SISR begins with SR-CNN [6], which introduces convolutional neural networks. VDSR [12] deepens this approach with residual learning. SRResNet [14] further expands the architecture using residual blocks, while EDSR [16] streamlines it, removing batch normalization for improved performance. RCAN [23] and RDN [24] advance feature extraction through channel attention and dense connections, respectively. These developments have greatly improved image quality but have also raised capacity and computational costs, posing challenges for real-world applications.
|
| 55 |
+
|
| 56 |
+
Lightweight SISR. The evolution of lightweight SISR models emphasizes efficiency in enhancing image quality. FSRCNN [7] starts with directly working on LR images for speed. MemNet [19] built upon this by introducing a memory mechanism for deeper detail restoration, while CARN [2] balances efficiency and accuracy using cascading designs. PAN [25] adds pixel attention for detail enhancement without heavy computational costs. LBNet [8] merges CNNs with transformers for high-quality SR on resource-constrained devices, and BSRN [15] progress with a scalable approach using separable convolutions.
|
| 57 |
+
|
| 58 |
+
Region-aware SISR. Region-aware SISR leverages the insight that high-frequency regions in an image are more challenging to restore than low-frequency ones. This approach aims to enhance efficiency by reducing redundant computation in low-frequency regions. AdaDSR [17] tailors its processing depth to the image's complexity, optimizing efficiency. FAD [21] adjusts its focus based on the input's frequency characteristics, enhancing detail in critical regions while conserving effort on smoother parts. MGA [10] initially applies a global restoration to the entire image and then refines specific regions locally, guided on a predicted mask.
|
| 59 |
+
|
| 60 |
+
Alongside, various studies have emerged focusing on efficiency in large-scale image SR. These studies decompose images into several patches and aim to enhance efficiency by dynamically allocating computational resources according to the restoration difficulty of each patch. ClassSR [13] is the first work of this area of research: it utilizes a classifier to categorize patches into simple, medium, or hard type, and assigns them to subnets with different capacities to reduce FLOPs. However, since ClassSR employs independent subnets, it leads to a significant increase in parameter count. ARM [4] resolves the limitation by decomposing the original network into subnets that share parameters, thus no additional parameters are introduced. On the other hand, APE [20] uses a regressor that predicts the incremental capacity at each layer for each patch, reducing FLOPs by early patch exiting while forwarding through network layers. In this line of study, moving away from the existing patch-distributing methods, we aim to distribute computational resources on a pixel-wise, seeking additional efficiency improvements through finer granularity.
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Fig. 3: The architecture of the proposed PCSR model when the number of classes $M$ is 2. We denote $q$ as a single query pixel in the HR space and $x_{q}$ for its coordinate. Pixel-level probabilities obtained from the classifier are used to allocate each query pixel to a suitably-sized upsampler for the prediction of its RGB value.
|
| 64 |
+
|
| 65 |
+
# 3 Method
|
| 66 |
+
|
| 67 |
+
# 3.1 Preliminary
|
| 68 |
+
|
| 69 |
+
Single Image Super-Resolution (SISR) is a task aimed at generating a high-resolution (HR) image from a single low-resolution (LR) input image. Within the framework of neural networks, the SISR model aims to discover a mapping function $F$ that converts a given LR image $I^{LR}$ into an HR image $I^{HR}$ . It can be represented by the equation:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
I ^ {H R} = F \left(I ^ {L R}; \theta\right), \tag {1}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $\theta$ is a set of model parameters. Typical models [2,7,8,14-16,23-25] can be decomposed into two main components: 1) a backbone $B$ that extracts features from $I^{LR}$ , and 2) an upsampler $U$ that utilizes the features to reconstruct $I^{HR}$ . Thus, the process can further be represented as follows:
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
Z = B (I ^ {L R}; \theta_ {B}), I ^ {H R} = U (Z; \theta_ {U}). (2)
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
Here, $\theta_B$ and $\theta_U$ are the parameters of the backbone and the upsampler respectively, and $Z$ is the extracted feature. In a convolutional neural network-based (i.e., CNN-based) upsampler, diverse operations are employed along with convolution layers to increase the resolution of the image being processed. These range from simple interpolation to more complex methods like deconvolution or sub-pixel convolution [18]. Instead of using a CNN-based upsampler, one can employ a multilayer perceptron-based (i.e., MLP-based) upsampler to operate in a pixel-wise manner, which will be further described in the following section.
|
| 82 |
+
|
| 83 |
+
# 3.2 Network Architecture
|
| 84 |
+
|
| 85 |
+
The overview of PCSR is shown in Fig. 3. Based on our prior discussion, a model consists of a backbone and a set of upsamplers. In addition, we employ
|
| 86 |
+
|
| 87 |
+
a classifier that measures the difficulty of restoring target pixels on the HR space (i.e., query pixels). LR input image is feed-forwarded to the backbone and corresponding LR feature is generated. Then, the classifier determines the restoration difficulty for each query pixel and its output RGB value is computed through the corresponding upsampler.
|
| 88 |
+
|
| 89 |
+
Backbone. We propose a pixel-wise computation distributing method for efficient large image SR. It is possible to use any existing deep SR networks as our backbone to fit a desired model size. For example, small-sized FSRCNN [7], medium-sized CARN [2], large-sized SRResNet [14], and also other models can be adopted.
|
| 90 |
+
|
| 91 |
+
Classifier. We introduce a lightweight classifier which is an MLP-based network, to obtain the probability of belonging to each upsampler (or class) in a pixel-wise manner. Given a query pixel coordinate $x_{q}$ , our classifier assigns it to one of the corresponding upsamplers depending on the classification probability to predict its RGB value. By properly assigning easy pixels to a lighter upsampler instead of a heavier upsampler, we can save on computational resources with minimal performance drop.
|
| 92 |
+
|
| 93 |
+
Let an LR input be $X \in \mathbb{R}^{h \times w \times 3}$ , and its corresponding HR be $Y \in \mathbb{R}^{H \times W \times 3}$ . And let $\{y_i\}_{i=1\dots HW}$ be the coordinate of each pixel within the HR $Y$ and $\{Y(y_i)\}_{i=1\dots HW}$ be the corresponding RGB values. Firstly, an LR feature $Z \in \mathbb{R}^{h \times w \times D}$ is calculated from the LR input using the backbone. Then, given the number of classes $M$ , classification probability $p_i \in \mathbb{R}^M$ is obtained by the classifier $C$ :
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
p _ {i} = \sigma \left(C \left(Z, y _ {i}; \theta_ {C}\right)\right), \tag {3}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $\sigma$ is a softmax function. The MLP-based classifier operates similarly to an upsampler, with the main difference being that its output dimension is M. Please see Eq. (4) for detailed information.
|
| 100 |
+
|
| 101 |
+
Upsampler. We employ LIIF [5] as our upsampler, which is suitable for pixel-level processing. We first normalize $y_{i}$ , which is previously defined, from the HR space to map it to the coordinate $\hat{y}_i \in \mathbb{R}^2$ in the LR space. Given the LR feature $Z$ , we denote $z_i^* \in \mathbb{R}^D$ as the nearest (by Euclidean distance) feature to the $\hat{y}_i$ and $v_{i}^{*} \in \mathbb{R}^{2}$ as the corresponding coordinate of that feature. Then the upsampling process is summarized as:
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
I ^ {S R} \left(y _ {i}\right) = U \left(Z, y _ {i}; \theta_ {U}\right) = U \left(\left[ z _ {i} ^ {*}, \hat {y} _ {i} - v _ {i} ^ {*} \right]; \theta_ {U}\right), \tag {4}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where $I^{SR}(y_i) \in \mathbb{R}^3$ is an RGB value at the $y_i$ and $[\cdot]$ is a concatenation operation. We can obtain the final output $I^{SR}$ by querying the RGB values for every $\{y_i\}_{i=1\dots HW}$ and combining them (Please refer to [5] for more details of LIIF processing). In our proposed method, $M$ parallel upsamplers $\{U_0, U_1, \dots, U_{M-1}\}$ can be exploited to handle a variety range of restoration difficulties (i.e. from heavy to light capacity).
|
| 108 |
+
|
| 109 |
+
# 3.3 Training
|
| 110 |
+
|
| 111 |
+
During the training phase, we feed-forward a query pixel through all $M$ upsamplers and aggregate the outputs to effectively back-propagate the gradient as follows:
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\hat {Y} \left(y _ {i}\right) = \sum_ {j = 0} ^ {M - 1} p _ {i, j} \times U _ {j} \left(Z, y _ {i}; \theta_ {U _ {j}}\right), \tag {5}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
where $\hat{Y}(y_i) \in \mathbb{R}^3$ is an RGB output at the $y_i$ and $p_{i,j}$ is the probability of that query pixel being in an upsampler $U_j$ .
|
| 118 |
+
|
| 119 |
+
Then we leverage two kinds of loss functions: reconstruction loss $L_{recon}$ , and average loss $L_{avg}$ which is similar one used in ClassSR [13]. The reconstruction loss is defined as the L1 loss between the RGB values of the predicted output and the target. Here, we consider the target as the difference between the ground-truth HR patch and the bilinear upsampled LR input patch. The reason is that we want the classifier to perform the classification task well, even with a very small capacity, by emphasizing high-frequency features. Therefore, the loss can be written as:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
L _ {r e c o n} = \sum_ {i = 1} ^ {H W} | (Y (y _ {i}) - u p X (y _ {i})) - \hat {Y} (y _ {i}) |, \tag {6}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
where $upX(y_{i})$ is the RGB value of the bilinear upsampled LR input patch at the location $y_{i}$ . For the average loss, we encourage a uniform assignment of pixels across each class by defining the loss as:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
L _ {a v g} = \sum_ {j = 1} ^ {M} \left| \sum_ {n = 1} ^ {N} \sum_ {i = 1} ^ {H W} p _ {n, i, j} - \frac {N H W}{M} \right|, \tag {7}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
where $p_{n,i,j}$ is probability of the $i$ -th pixel of the $n$ -th HR image (i.e. batch dimension, with batch size $N$ ) being in the $j$ -th class. Here, we consider the probability for being in each class as the effective number of pixel assignments to that class. We set the target as $\frac{NHW}{M}$ because we want to allocate the same number of pixels to each class (or upsampler), out of a total of $NHW$ pixels. Finally, total loss $L$ is defined as:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
L = w _ {r e c o n} \times L _ {r e c o n} + w _ {a v g} \times L _ {a v g}. \tag {8}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
Since jointly training all modules (i.e., backbone $B$ , classifier $C$ , upsamplers $U_{j\in [0,M)}$ ) from scratch can lead to unstable training, we adopt multi-stage training strategy. Assuming that the capacity of the upsampler decreases from $U_0$ to $U_{M - 1}$ , the upper bound of the model's performance is determined by the backbone $B$ and the heaviest upsampler $U_0$ . Thus, we initially train $\{B,U_0\}$ only using the reconstruction loss. And then, starting from $j = 1$ to $j = M - 1$ , the following process is repeated: Firstly, freeze $\{B,U_0,\dots ,U_{j - 1}\}$ that are trained already. Secondly, attach $U_j$ to the backbone (and also newly attach $C$ for $j = 1$ ). Lastly, jointly train $\{U_j,C\}$ using the total loss.
|
| 138 |
+
|
| 139 |
+
# 3.4 Inference
|
| 140 |
+
|
| 141 |
+
In the inference phase of PCSR, the overall process is similar to training, but a query pixel is assigned to a unique upsampler branch based on the predicted classification probabilities. While one can allocate the pixel to the branch with the highest probability, we provide users controllability for traversing the computation-performance trade-off without re-training. To this end, FLOP count is considered in the decision-making process. We define and pre-calculate the impact of each upsampler $U_{j\in [0,M)}$ in terms of FLOPs as:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
c o s t (U _ {j}) = \sigma (f l o p s (B; (h _ {0}, w _ {0})) + f l o p s (U _ {j}; (h _ {0}, w _ {0}))), \tag {9}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
where $\sigma$ is the softmax function and $flops(\cdot)$ refers to FLOPs of the module, given the fixed resolution $(h_0, w_0)^3$ . The branch allocation for pixel at $y_i$ is then determined as follows:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\operatorname {a r g m a x} _ {j} \frac {p _ {i , j}}{\left[ \operatorname {c o s t} \left(U _ {j}\right) \right] ^ {k}}, \tag {10}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
where $k$ is a hyperparameter and $p_{i,j}$ is the probability of that query pixel being in $U_j$ , as mentioned previously. By the definition, setting lower $k$ value results in more pixels being assigned to the heavier upsamplers, minimizing performance degradation while increasing computational load. Conversely, a higher $k$ value assigns more pixels to the lighter upsamplers, accepting a reduction in performance in exchange for lower computational demand.
|
| 154 |
+
|
| 155 |
+
Adaptive Decision Making (ADM). While our method allows users to manage the computation-performance trade-off, we also provide an additional functionality that automatically allocates pixels based on probability values with considering statistics across the entire image. It proceeds as follows: Given $\forall p_{i,j}$ for a single input image and considering $U_{j\in [0,\lfloor (M + 1) / 2\rfloor)}$ as heavy upsamplers, $\text{sum}_{0\leq j < \lfloor (M + 1) / 2\rfloor}p_{i,j}$ is computed to represent the restoration difficulty of that pixel, resulting in total number of $i$ values. Then we group the values into $M$ clusters using a clustering algorithm. Finally, by assigning each group to the upsamplers ranging from the heaviest $U_{0}$ to the lightest $U_{M - 1}$ based on the its centroid value, all pixels are allocated to the appropriate upsampler. We especially employ the K-means clustering to minimize computational load. As we uniformly initialize the centroid values, the process is deterministic. We demonstrate the efficacy of ADM in the appendix.
|
| 156 |
+
|
| 157 |
+
Pixel-wise Refinement. Since the RGB value for each pixel is predicted by the independent upsampler, artifacts can arise when adjacent pixels are assigned to upsamplers with different capacities. To address this issue, we propose a simple solution: we again treat the lower half of the upsamplers by capacity as light upsamplers and the upper half as heavy upsamplers, performing refinement when
|
| 158 |
+
|
| 159 |
+
adjacent pixels are allocated to different types of upsamplers. To be specific, for pixels assigned to $U_{j}$ where $\lfloor (M + 1) / 2\rfloor \leq j < M$ (i.e., light upsamplers), if at least one neighboring pixel has been assigned to $U_{j}$ with $0 \leq j < \lfloor (M + 1) / 2\rfloor$ (i.e., heavy upsamplers), we replace its RGB value with the average value of the neighboring pixels (including itself) in the SR output. Our pixel-wise refinement algorithm works without needing any extra forward processing, effectively reducing artifacts with only a small amount of extra computation and having minimal effect on the overall performance.
|
| 160 |
+
|
| 161 |
+
# 4 Experiments
|
| 162 |
+
|
| 163 |
+
# 4.1 Settings
|
| 164 |
+
|
| 165 |
+
Training. To ensure a fair comparison, we aligned the overall training settings to match those of ClassSR and ARM. We densely cropped DIV2K [1] (from index 0001-0800) into 1.59 million 32x32 LR sub-images for training dataset and random rotation and flipping are applied for data augmentation. We adopt existing FSRCNN [7], CARN [2], and SRResNet [14] as backbones with their original parameters of 25K, 295K, and 1.5M respectively. Throughout all training phases for both the original models and PCSR, the batch size is 16 and the initial learning rate is set at 0.001 for FSRCNN and 0.0002 for CARN and SRResNet with cosine annealing scheduling. Adam optimizer is used. Both the original models and the initial PCSR (which includes only the backbone and the heaviest upsampler) are trained with 2,000K iterations, while subsequent stages of PCSR's training use 500K iterations. In the initial PCSR, we fine-tuned the hidden dimension of the backbone and adjusted the MLP size of the heaviest upsampler to maintain performance parity with the original models in terms of PSNR and FLOPs. In our implementation, we simply set $M = 2$ as it shows the decent performance with its simplicity, which will be verified in the Sec. 4.3.
|
| 166 |
+
|
| 167 |
+
Evaluation. We mainly evaluate our method on the Test2K/Test4K/Test8K [13] which are downsampled from DIV8K [9], and the Urban100 [11] which consists of much larger images than the commonly used benchmarks such as Set5 [3] and Set14 [22]. For the evaluation metrics, we use PSNR (Peak Signal-to-Noise Ratio) to assess the quality of the SR images, and FLOPs (Floating Point Operations) to measure the computational efficiency. PSNR is calculated on the RGB space and FLOPs are measured on the full image. Unless specified, the original model and our PCSR is evaluated at full resolution, while ClassSR and ARM are evaluated on an overlapped patch basis. Other evaluation protocols follow those of ClassSR and ARM. When comparing PCSR with comparison groups, pixel-wise refinement is always employed and hyperparameter $k$ is adjusted to match their performance or ADM is used.
|
| 168 |
+
|
| 169 |
+
Table 1: The comparison of the previous patch-level methods and our pixel-level method PCSR on the large image SR benchmarks: Test2K, Test4K, Test8K, and Urban 100 with $\times 4$ SR. The lowest FLOPs values are highlighted in bold.
|
| 170 |
+
|
| 171 |
+
<table><tr><td>Models</td><td>Params.</td><td>Test2K(dB)</td><td>GFLOPs</td><td>Test4K(dB)</td><td>GFLOPs</td></tr><tr><td>FSRCNN</td><td>25K</td><td>25.69</td><td>45.3 (100%)</td><td>26.99</td><td>185.3 (100%)</td></tr><tr><td>FSRCNN-ClassSR</td><td>113K</td><td>25.61</td><td>38.4 (85%)</td><td>26.91</td><td>146.4 (79%)</td></tr><tr><td>FSRCNN-ARM</td><td>25K</td><td>25.61</td><td>35.6 (79%)</td><td>26.91</td><td>152.9 (83%)</td></tr><tr><td>FSRCNN-PCSR</td><td>25K</td><td>25.61</td><td>8.5 (19%)</td><td>26.91</td><td>32.6 (18%)</td></tr><tr><td>CARN</td><td>295K</td><td>26.03</td><td>112.0 (100%)</td><td>27.45</td><td>457.8 (100%)</td></tr><tr><td>CARN-ClassSR</td><td>645K</td><td>26.01</td><td>101.7 (91%)</td><td>27.42</td><td>384.1 (84%)</td></tr><tr><td>CARN-ARM</td><td>295K</td><td>26.01</td><td>99.8 (89%)</td><td>27.42</td><td>379.2 (83%)</td></tr><tr><td>CARN-PCSR</td><td>169K</td><td>26.01</td><td>64.0 (57%)</td><td>27.42</td><td>260.0 (58%)</td></tr><tr><td>SRResNet</td><td>1.5M</td><td>26.24</td><td>502.9 (100%)</td><td>27.71</td><td>2056.2 (100%)</td></tr><tr><td>SRResNet-ClassSR</td><td>3.1M</td><td>26.20</td><td>446.7 (89%)</td><td>27.66</td><td>1686.2 (82%)</td></tr><tr><td>SRResNet-ARM</td><td>1.5M</td><td>26.20</td><td>429.1 (85%)</td><td>27.66</td><td>1742.2 (85%)</td></tr><tr><td>SRResNet-PCSR</td><td>1.1M</td><td>26.20</td><td>245.6 (49%)</td><td>27.66</td><td>981.0 (48%)</td></tr><tr><td>Models</td><td>Params.</td><td>Test8K(dB)</td><td>GFLOPs</td><td>Urban100(dB)</td><td>GFLOPs</td></tr><tr><td>FSRCNN</td><td>25K</td><td>32.82</td><td>1067.8 (100%)</td><td>23.05</td><td>19.9 (100%)</td></tr><tr><td>FSRCNN-ClassSR</td><td>113K</td><td>32.73</td><td>709.2 (66%)</td><td>22.89</td><td>20.8 (105%)</td></tr><tr><td>FSRCNN-ARM</td><td>25K</td><td>32.73</td><td>746.7 (70%)</td><td>22.89</td><td>19.9 (100%)</td></tr><tr><td>FSRCNN-PCSR</td><td>25K</td><td>32.73</td><td>196.6 (18%)</td><td>22.89</td><td>3.4 (17%)</td></tr><tr><td>CARN</td><td>295K</td><td>33.29</td><td>2638.6 (100%)</td><td>24.03</td><td>49.3 (100%)</td></tr><tr><td>CARN-ClassSR</td><td>645K</td><td>33.25</td><td>1829.9 (69%)</td><td>24.00</td><td>51.7 (105%)</td></tr><tr><td>CARN-ARM</td><td>295K</td><td>33.26</td><td>1783.2 (68%)</td><td>23.99</td><td>50.8 (103%)</td></tr><tr><td>CARN-PCSR</td><td>169K</td><td>33.25</td><td>1355.1 (51%)</td><td>24.00</td><td>29.6 (60%)</td></tr><tr><td>SRResNet</td><td>1.5M</td><td>33.55</td><td>11850.7 (100%)</td><td>24.65</td><td>221.3 (100%)</td></tr><tr><td>SRResNet-ClassSR</td><td>3.1M</td><td>33.50</td><td>7996.0 (67%)</td><td>24.54</td><td>226.5 (102%)</td></tr><tr><td>SRResNet-ARM</td><td>1.5M</td><td>33.50</td><td>7865.3 (66%)</td><td>24.54</td><td>245.2 (111%)</td></tr><tr><td>SRResNet-PCSR</td><td>1.1M</td><td>33.52</td><td>5093.7 (43%)</td><td>24.54</td><td>124.9 (56%)</td></tr></table>
|
| 172 |
+
|
| 173 |
+
# 4.2 Main Results
|
| 174 |
+
|
| 175 |
+
As demonstrated in Tab. 1, our proposed method, PCSR, exhibits better computational efficiency compared to previous patch-based efficient SR models [4,13] on four benchmarks, Test2K/Test4K/Test8K, and Urban100. We assess the computational costs (FLOPs) of the existing SR models [4,10,13] while ensuring their PSNR performance remain comparable.
|
| 176 |
+
|
| 177 |
+
We also provide qualitative results with the PSNR and FLOPs of each generated image for better comparisons in Fig. 4. Patch-level approaches such as ClassSR and ARM fail in fine-grained restoration difficulty classification. In contrast, our method can process input image more precisely due to pixel-level classification, resulting in efficient and effective SR outputs. For more detailed analysis, in Fig. 4a, ClassSR and ARM classify the shown patch area as easy one due to the dominance of the flat region, so they fail to restore thin lines well. On the other hand, our method properly classifies those lines in pixel-level difficulty classification, so it recovers them well. In Fig. 4b, due to over-computation by the patch-based methods, our approach demonstrates much better computa
|
| 178 |
+
|
| 179 |
+
Table 2: The comparison of the MGA and our PCSR on Test2K, Test4K, and Urban100 with $\times 4$ SR. The lowest FLOPs values are highlighted in bold.
|
| 180 |
+
|
| 181 |
+
<table><tr><td>Models</td><td>Params.</td><td>Test2K(dB)</td><td>GFLOPs</td><td>Test4K(dB)</td><td>GFLOPs</td><td>Urban100(dB)</td><td>GFLOPs</td></tr><tr><td>FSRCNN</td><td>25K</td><td>25.68</td><td>45.3 (100%)</td><td>26.98</td><td>185.3 (100%)</td><td>23.02</td><td>19.9 (100%)</td></tr><tr><td>FSRCNN-MGA</td><td>43K</td><td>25.66</td><td>29.2 (64%)</td><td>26.94</td><td>101.7 (55%)</td><td>23.01</td><td>14.6 (73%)</td></tr><tr><td>FSRCNN-PCSR</td><td>25K</td><td>25.66</td><td>12.8 (28%)</td><td>26.94</td><td>37.8 (20%)</td><td>23.01</td><td>4.3 (22%)</td></tr><tr><td>SRResNet</td><td>1.5M</td><td>26.30</td><td>502.9 (100%)</td><td>27.79</td><td>2056.2 (100%)</td><td>24.87</td><td>221.3 (100%)</td></tr><tr><td>SRResNet-MGA</td><td>2.0M</td><td>26.20</td><td>249.2 (50%)</td><td>27.66</td><td>871.9 (42%)</td><td>24.55</td><td>124.0 (56%)</td></tr><tr><td>SRResNet-PCSR</td><td>0.9M</td><td>26.20</td><td>191.0 (38%)</td><td>27.66</td><td>755.3 (37%)</td><td>24.55</td><td>97.3 (44%)</td></tr></table>
|
| 182 |
+
|
| 183 |
+
Table 3: Comparison of our PCSR and ClassSR according to the patch size, on Test2K $(\times 4)$ . To ensure a fair comparison, the original model (CARN) and our model (CARN-PCSR) are also evaluated on decomposed input patches. The LR input size is cropped to multiples of 128 without overlap to maintain consistency across patch sizes.
|
| 184 |
+
|
| 185 |
+
<table><tr><td>Patch Size</td><td colspan="2">16</td><td colspan="2">32</td><td colspan="2">64</td><td colspan="2">128</td></tr><tr><td></td><td>PSNR(dB)</td><td>GFLOPs</td><td>PSNR(dB)</td><td>GFLOPs</td><td>PSNR(dB)</td><td>GFLOPs</td><td>PSNR(dB)</td><td>GFLOPs</td></tr><tr><td>CARN</td><td>26.04</td><td>98.6 (100%)</td><td>26.13</td><td>98.6 (100%)</td><td>26.18</td><td>98.6 (100%)</td><td>26.20</td><td>98.6 (100%)</td></tr><tr><td>CARN-ClassSR</td><td>26.03</td><td>66.7 (68%)</td><td>26.12</td><td>69.8 (71%)</td><td>26.16</td><td>72.5 (74%)</td><td>26.17</td><td>75.8 (77%)</td></tr><tr><td>CARN-PCSR</td><td>26.03</td><td>61.1 (62%)</td><td>26.12</td><td>60.3 (61%)</td><td>26.16</td><td>56.9 (58%)</td><td>26.17</td><td>54.5 (55%)</td></tr></table>
|
| 186 |
+
|
| 187 |
+
tional savings. This is attributed to our method's efficient distribution of computational resources, allowing us to achieve comparable or better performance while minimizing computational overhead. In Fig. 4c, ClassSR waste computational resources, while ARM reduced computations excessively, resulting in inferior output quality. In contrast, our pixel-level approach enables more effective utilization of resources, leading to improved performance.
|
| 188 |
+
|
| 189 |
+
In Tab. 2, we further evaluate our method with the per-image processing efficient SR method, MGA [10]. To make a fair comparison, we use the same training dataset and input patch size as used in MGA and retrain our model. Even when compared to the per-image processing method, our model shows better efficiency with much fewer parameters, demonstrating its broad applicability and overall effectiveness.
|
| 190 |
+
|
| 191 |
+
# 4.3 Ablation Studies
|
| 192 |
+
|
| 193 |
+
Input Patch Size. As shown in Tab. 3, our experiments demonstrate that efficiency of the patch-distributing method [13] decreases as the size of the patch increases. This decline occurs because larger patches are more likely to contain a mix of easy and hard regions at the pixel level, making precise prediction of patch difficulty more challenging. In contrast to the patch-level approach, our method employs a pixel-level approach, allowing any patch sizes without computational efficiency decline. Our method is more efficient than the patch-level approach at all patch sizes, with the gap becoming more pronounced as the patch size increases.
|
| 194 |
+
|
| 195 |
+
Impact of the number of classes. In Table 4, we explore the impact of the number of classes on the efficiency of PCSR by comparing cases with $M = 2$
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
Fig. 4: Qualitative results of previous methods [4,13] and our method with $\times 4$ SR.
|
| 199 |
+
|
| 200 |
+
Table 4: Comparison depending on the number of classes $M$ with $\times 4\mathrm{{SR}}$ .
|
| 201 |
+
|
| 202 |
+
<table><tr><td>Models</td><td>Params.</td><td>Test2K(dB)</td><td>GFLOPs</td><td>Test4K(dB)</td><td>GFLOPs</td><td>Urban100(dB)</td><td>GFLOPs</td></tr><tr><td>CARN</td><td>295K</td><td>26.03</td><td>112.0 (100%)</td><td>27.45</td><td>457.8 (100%)</td><td>24.03</td><td>49.3 (100%)</td></tr><tr><td>CARN-PCSR-2class</td><td>169K</td><td>26.01</td><td>64.0 (57%)</td><td>27.42</td><td>260.0 (58%)</td><td>24.00</td><td>29.6 (60%)</td></tr><tr><td>CARN-PCSR-3class</td><td>181K</td><td>26.01</td><td>62.4 (56%)</td><td>27.42</td><td>245.1 (54%)</td><td>24.00</td><td>28.6 (58%)</td></tr></table>
|
| 203 |
+
|
| 204 |
+
Table 5: Comparison of multi-scale PCSR and ARM on Test2K. Our model (CARN-PCSR) is retrained in a multi-scale training setting with a scale range of [2,4].
|
| 205 |
+
|
| 206 |
+
<table><tr><td></td><td></td><td colspan="3">x2</td><td colspan="3">x4</td><td colspan="3">x8</td></tr><tr><td>Models</td><td>Total Params.</td><td>Params.</td><td>PSNR</td><td>FLOPs</td><td>Params.</td><td>PSNR</td><td>FLOPs</td><td>Params.</td><td>PSNR</td><td>FLOPs</td></tr><tr><td>CARN-original</td><td>885K</td><td>258K</td><td>30.79dB</td><td>335G</td><td>295K</td><td>26.03dB</td><td>112G</td><td>332K</td><td>23.51dB</td><td>57G</td></tr><tr><td>CARN-ARM</td><td>885K</td><td>258K</td><td>30.57dB</td><td>181G</td><td>295K</td><td>25.85dB</td><td>60G</td><td>332K</td><td>23.17dB</td><td>31G</td></tr><tr><td>CARN-PCSR</td><td>169K</td><td>169K</td><td>30.57dB</td><td>233G</td><td>169K</td><td>25.85dB</td><td>56G</td><td>169K</td><td>23.48dB</td><td>31G</td></tr></table>
|
| 207 |
+
|
| 208 |
+
and $M = 3$ . While both scenarios exhibit high efficiency compared to the original model, the case with fewer classes has minimal impact on efficiency while using fewer parameters. Therefore, for simplicity, we choose $M = 2$ .
|
| 209 |
+
|
| 210 |
+
Multi-scale SR. By leveraging LIIF [5] as our upsampler, our model inherently benefits from LIIF's key feature of multi-scale SR. It allows us to maintain efficiency that only a single model is required to accommodate diverse scale factors, unlike other methods which necessitate individual models for each scale factor. We demonstrate this advantage of LIIF-based upsampling in Tab. 5. Furthermore, our model can extend to arbitrary-scale SR, including non-integer scales, a capability not achievable with conventional patch-based approaches.
|
| 211 |
+
|
| 212 |
+
Pixel-wise Refinement. In a patch-level approach, using individual models based on patch-wise difficulties can result in artifacts when adjacent areas are assigned to different models. This issue can be mitigated by employing patch overlapping, where overlapped areas are averaged with multiple patch-level SR outputs. However, this solution harms computational efficiency by increasing the number of patches per image. Similarly, using upsamplers based on pixel-wise difficulties can cause artifacts if neighboring pixels are assigned to different upsamplers. Our pixel-wise refinement algorithm does not require any additional forward processing, allowing artifacts to be effectively mitigated with minor additional computations and minimal impact on performance. Fig. 5 illustrates the efficacy of our simple yet effective pixel-wise refinement algorithm.
|
| 213 |
+
|
| 214 |
+
# 5 Limitation and Future Works
|
| 215 |
+
|
| 216 |
+
Our PCSR dynamically allocates resources based on the restoration difficulty of each pixel, thus pursuing further efficiency improvements through finer granularity. Nevertheless, a limitation exists: since our classifier operates based on LR features from backbone, the lower bound of PCSR's FLOPs is determined by the
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Fig. 5: Visualization of the artifact reduction by the pixel-wise refinement.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
|
| 227 |
+
size of the backbone. This can lead to unnecessary computation for images with predominantly flat regions. To mitigate this, we plan to have the classifier work on the backbone's earlier layers or use a lookup table for straightforward pixel processing through bilinear interpolation from the LR input, significantly reducing computational costs compared to neural network processing. Additionally, for future works, applying the PCSR to generative models to enhance efficiency, as well as integrating it with techniques such as model compression, pruning, and quantization, presents promising opportunities.
|
| 228 |
+
|
| 229 |
+
# 6 Conclusion
|
| 230 |
+
|
| 231 |
+
This paper introduces the Pixel-level Classifier for Single Image Super-Resolution (PCSR), a novel approach to efficient SR for large images. Unlike existing patch-distributing methods, PCSR allocates computational resources at the pixel level, addressing varying restoration difficulties and reducing redundant computations with finer granularity. It also offers tunability during inference, balancing performance and computational cost without re-training. Additionally, an automatic pixel assignment using K-means clustering and a post-processing technique to remove artifacts are also provided. Experiments show that PCSR outperforms existing methods in the PSNR-FLOP trade-off across various SISR models and benchmarks. We believe our proposed method facilitates the practicality and accessibility of large image SR for real-world applications.
|
| 232 |
+
|
| 233 |
+
# Acknowledgement
|
| 234 |
+
|
| 235 |
+
This research was supported and funded by Artificial Intelligence Graduate School Program under Grant (2020-0-01361), the National Research Foundation of Korea(NRF) grant funded by the Korea government (MSIT) (NRF-2022R1A2C2004509), and Samsung Electronics Co., Ltd. (Mobile eXperience Business).
|
| 236 |
+
|
| 237 |
+
# References
|
| 238 |
+
|
| 239 |
+
1. Agustsson, E., Timofte, R.: Ntire 2017 challenge on single image super-resolution: Dataset and study. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 126-135 (2017)
|
| 240 |
+
2. Ahn, N., Kang, B., Sohn, K.A.: Fast, accurate, and lightweight super-resolution with cascading residual network. In: Proceedings of the European conference on computer vision (ECCV). pp. 252-268 (2018)
|
| 241 |
+
3. Bevilacqua, M., Roumy, A., Guillemot, C., Alberi-Morel, M.L.: Low-complexity single-image super-resolution based on nonnegative neighbor embedding (2012)
|
| 242 |
+
4. Chen, B., Lin, M., Sheng, K., Zhang, M., Chen, P., Li, K., Cao, L., Ji, R.: Arm: Any-time super-resolution method. In: European Conference on Computer Vision. pp. 254-270. Springer (2022)
|
| 243 |
+
5. Chen, Y., Liu, S., Wang, X.: Learning continuous image representation with local implicit image function. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8628-8638 (2021)
|
| 244 |
+
6. Dong, C., Loy, C.C., He, K., Tang, X.: Image super-resolution using deep convolutional networks. IEEE transactions on pattern analysis and machine intelligence 38(2), 295-307 (2015)
|
| 245 |
+
7. Dong, C., Loy, C.C., Tang, X.: Accelerating the super-resolution convolutional neural network. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14. pp. 391-407. Springer (2016)
|
| 246 |
+
8. Gao, G., Wang, Z., Li, J., Li, W., Yu, Y., Zeng, T.: Lightweight bimodal network for single-image super-resolution via symmetric cnn and recursive transformer. arXiv preprint arXiv:2204.13286 (2022)
|
| 247 |
+
9. Gu, S., Lugmayr, A., Danelljan, M., Fritsche, M., Lamour, J., Timofte, R.: Div8k: Diverse 8k resolution image dataset. In: 2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW). pp. 3512-3516. IEEE (2019)
|
| 248 |
+
0. Hu, X., Xu, J., Gu, S., Cheng, M.M., Liu, L.: Restore globally, refine locally: A mask-guided scheme to accelerate super-resolution networks. In: European Conference on Computer Vision. pp. 74-91. Springer (2022)
|
| 249 |
+
1. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015)
|
| 250 |
+
2. Kim, J., Lee, J.K., Lee, K.M.: Accurate image super-resolution using very deep convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1646-1654 (2016)
|
| 251 |
+
3. Kong, X., Zhao, H., Qiao, Y., Dong, C.: Classssr: A general framework to accelerate super-resolution networks by data characteristic. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 12016-12025 (2021)
|
| 252 |
+
4. Ledig, C., Theis, L., Huszár, F., Caballero, J., Cunningham, A., Acosta, A., Aitken, A., Tejani, A., Totz, J., Wang, Z., et al.: Photo-realistic single image super-resolution using a generative adversarial network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4681-4690 (2017)
|
| 253 |
+
5. Li, Z., Liu, Y., Chen, X., Cai, H., Gu, J., Qiao, Y., Dong, C.: Blueprint separable residual network for efficient image super-resolution. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 833-843 (2022)
|
| 254 |
+
|
| 255 |
+
16. Lim, B., Son, S., Kim, H., Nah, S., Mu Lee, K.: Enhanced deep residual networks for single image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 136-144 (2017)
|
| 256 |
+
17. Liu, M., Zhang, Z., Hou, L., Zuo, W., Zhang, L.: Deep adaptive inference networks for single image super-resolution. In: Computer Vision-ECCV 2020 Workshops: Glasgow, UK, August 23-28, 2020, Proceedings, Part IV 16. pp. 131-148. Springer (2020)
|
| 257 |
+
18. Shi, W., Caballero, J., Huszar, F., Totz, J., Aitken, A.P., Bishop, R., Rueckert, D., Wang, Z.: Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1874-1883 (2016)
|
| 258 |
+
19. Tai, Y., Yang, J., Liu, X., Xu, C.: Memnet: A persistent memory network for image restoration. In: Proceedings of the IEEE international conference on computer vision. pp. 4539-4547 (2017)
|
| 259 |
+
20. Wang, S., Liu, J., Chen, K., Li, X., Lu, M., Guo, Y.: Adaptive patch exiting for scalable single image super-resolution. In: European Conference on Computer Vision. pp. 292-307. Springer (2022)
|
| 260 |
+
21. Xie, W., Song, D., Xu, C., Xu, C., Zhang, H., Wang, Y.: Learning frequency-aware dynamic network for efficient super-resolution. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4308-4317 (2021)
|
| 261 |
+
22. Yang, J., Wright, J., Huang, T.S., Ma, Y.: Image super-resolution via sparse representation. IEEE transactions on image processing 19(11), 2861-2873 (2010)
|
| 262 |
+
23. Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: Proceedings of the European conference on computer vision (ECCV). pp. 286-301 (2018)
|
| 263 |
+
24. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2472-2481 (2018)
|
| 264 |
+
25. Zhao, H., Kong, X., He, J., Qiao, Y., Dong, C.: Efficient image super-resolution using pixel attention. In: Computer Vision-ECCV 2020 Workshops: Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16. pp. 56-72. Springer (2020)
|
acceleratingimagesuperresolutionnetworkswithpixellevelclassification/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e56763a6afda537960d609619f18845b9d859ab7baddae90d0a3bc443241fb11
|
| 3 |
+
size 672446
|
acceleratingimagesuperresolutionnetworkswithpixellevelclassification/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b57fb5ae7dcb9ec18c93f0a9162a62b9158eeac0b10bc95dd5f1c8132aa4ca6a
|
| 3 |
+
size 362356
|
acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/ced3e8e2-1e3c-4b26-b017-e6d2c3938fe5_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:003b7003efc7e91b88bca93a324c7dadcd3828eae1192aeddc66cf2b91f21c88
|
| 3 |
+
size 77555
|
acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/ced3e8e2-1e3c-4b26-b017-e6d2c3938fe5_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e4f5bd883f9ee7101601570b5f60035e1b1d0ee5c44197947f03ed8be6f4ba4
|
| 3 |
+
size 91914
|
acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/ced3e8e2-1e3c-4b26-b017-e6d2c3938fe5_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7cdfa35c80edc22cdb0c9f61208b07748794dea83177ad8e0bdcc3c4f58c0dc
|
| 3 |
+
size 2444982
|
acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/full.md
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Online Mapping and Behavior Prediction via Direct BEV Feature Attention
|
| 2 |
+
|
| 3 |
+
Xunjiang Gu $^{1}$ , Guanyu Song $^{1}$ , Igor Gilitschenski $^{1,2}$ , Marco Pavone $^{3,4}$ , and Boris Ivanovic $^{3}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Toronto <sup>2</sup>Vector Institute <sup>3</sup>NVIDIA Research <sup>4</sup>Stanford University {alfred.gu, guanyu.song}@mail.utoronto.ca, gilitschenski@cs.toronto.edu, {mpavone, bivanovic}@nvidia.com, pavone@stanford.edu
|
| 6 |
+
|
| 7 |
+
Abstract. Understanding road geometry is a critical component of the autonomous vehicle (AV) stack. While high-definition (HD) maps can readily provide such information, they suffer from high labeling and maintenance costs. Accordingly, many recent works have proposed methods for estimating HD maps online from sensor data. The vast majority of recent approaches encode multi-camera observations into an intermediate representation, e.g., a bird's eye view (BEV) grid, and produce vector map elements via a decoder. While this architecture is performant, it decimates much of the information encoded in the intermediate representation, preventing downstream tasks (e.g., behavior prediction) from leveraging them. In this work, we propose exposing the rich internal features of online map estimation methods and show how they enable more tightly integrating online mapping with trajectory forecasting<sup>1</sup>. In doing so, we find that directly accessing internal BEV features yields up to $73\%$ faster inference speeds and up to $29\%$ more accurate predictions on the real-world nuScenes dataset.
|
| 8 |
+
|
| 9 |
+
Keywords: Autonomous Driving $\cdot$ Online HD Map Estimation $\cdot$ Behavior Prediction
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Perceiving the static environment surrounding an autonomous vehicle (AV) is a critical task for autonomous driving, providing geometric information (e.g., road layout) to downstream behavior prediction and motion planning modules. Traditionally, high-definition (HD) maps have served as the backbone for this understanding, offering centimeter-level geometries for road boundaries, lane dividers, lane centerlines, pedestrian crosswalks, traffic signs, road markings, and more. They have proven to be an indispensable part of enhancing AV situational awareness and navigational judgment in downstream prediction tasks. However, despite their undeniable utility, collecting and maintaining HD maps is labor-intensive and costly, which limits their scalability.
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Fig. 1: Online map estimation approaches predominantly encode multi-camera observations into a canonical BEV feature grid prior to decoding vectorized map elements. In this work, we propose deeply integrating online mapping with downstream tasks through direct access to the rich BEV features of online map estimation methods.
|
| 17 |
+
|
| 18 |
+
In recent years, online HD map estimation methods have emerged as an alternative, aiming to predict HD map information directly from sensor observations. Starting from (multi-)camera images and optionally LiDAR pointclouds, state-of-the-art HD map estimation methods broadly employ an encoder-decoder neural network architecture (Fig. 1). An encoder first converts the sensor observations to a bird's eye view (BEV) grid of features. A decoder then predicts the location and semantic type of map elements from the BEV features. The resulting road geometries are commonly structured as combinations of polylines and polygons per map element type (e.g., road boundaries, lane dividers, pedestrian crosswalks). Such online-estimated maps serve as a practical substitute for offline HD mapping, providing necessary scene context for downstream tasks such as behavior prediction and motion planning. As an example, recent work [13] has shown success in coupling various map estimation methods with existing prediction frameworks, highlighting their potential to expedite the development of end-to-end AV stacks.
|
| 19 |
+
|
| 20 |
+
While such encoder-decoder approaches produce accurate HD maps, as we will show in Sec. 4, the attention mechanisms employed in decoding are computationally expensive (occupying the majority of model runtime) and do not produce outputs with associated uncertainty, which limits the ability of downstream modules to account for uncertainty. Moreover, such an architecture prevents downstream tasks from leveraging the rich intermediate features generated in the encoder's perspective-view-to-bird's-eye-view (PV2BEV) transformation, decimating information that cannot be described by point sets.
|
| 21 |
+
|
| 22 |
+
Contributions. Towards this end, we introduce three novel scene encoding strategies that leverage internal BEV features to improve the performance and accelerate the runtime of combined online mapping and behavior prediction systems. By directly leveraging BEV features, our proposed methods provide tighter integrations between map estimation and behavior prediction frameworks, achieving up to $73\%$ faster system inference speeds and an up to $29\%$ increase in downstream prediction accuracy on the real-world nuScenes dataset.
|
| 23 |
+
|
| 24 |
+
# 2 Related Work
|
| 25 |
+
|
| 26 |
+
# 2.1 Online Map Estimation
|
| 27 |
+
|
| 28 |
+
Online map estimation methods focus on generating a representation of the static environment surrounding an autonomous vehicle from its sensor data. Initial approaches used 2D BEV rasterized semantic segmentations as world representations. These maps were produced by either transforming observations to 3D and collapsing along the $Z$ -axis [26, 28] or by utilizing cross-attention in geometry-aware Transformer [34] models [2, 20].
|
| 29 |
+
|
| 30 |
+
Recently, there has been a growth in vectorized map estimation methods that extend traditional BEV rasterization approaches. These methods employ an encoder-decoder architecture which regresses and classifies map elements in the form of polylines, polygons, and other curve representations [29]. Initial methods such as SuperFusion [5] and HDMapNet [19] combined both LiDAR point clouds and RGB images into a common BEV feature frame, with a subsequent handcrafted post-processing stage to generate polyline map elements. To eliminate this post-processing step, VectorMapNet [24] and InstaGram [32] propose end-to-end models for vectorized HD map estimation.
|
| 31 |
+
|
| 32 |
+
In parallel, HD map estimation has also been formulated as a point set prediction task in the MapTR line of work [22, 23] and its extensions [36], yielding significant advancements in map estimation performance. To enable online inference from streaming observations, StreamMapNet [37] introduces a memory buffer that incorporates temporal data from prior timesteps. As many of these methods are commonly employed today, in this work we show how BEV features from multiple diverse mapping approaches can be leveraged to improve integrated system performance.
|
| 33 |
+
|
| 34 |
+
# 2.2 Map-Informed Trajectory Prediction
|
| 35 |
+
|
| 36 |
+
Learning-based trajectory prediction approaches initially leveraged rasterized maps for semantic scene context [30]. The rasterized map is treated as a top-down image and encoded via a convolutional neural network (CNN), concatenated with other scene context information (e.g., agent state history), and passed through the rest of the model [9, 16, 27, 31, 38]. Recently, state-of-the-art trajectory prediction methods have increasingly shifted to directly encoding raw polyline information from vectorized HD maps, demonstrating significant improvements in prediction accuracy. Initial approaches [8, 10, 11, 21, 39] utilized graph neural networks (GNNs) to encode lane polylines as nodes and their interactions with agent trajectories as edges. Extending this insight, Transformer [34] architectures with attention over map and agent embeddings have been widely adopted by current state-of-art methods [4, 12, 25, 40].
|
| 37 |
+
|
| 38 |
+
One recent related work investigates the integration between different combinations of map estimation and trajectory prediction models [13]. In it, they propose exposing uncertainties from map element regression and classification to downstream behavior prediction. In contrast to [13], our work focuses on
|
| 39 |
+
|
| 40 |
+
exposing information from an earlier stage of online mapping (immediately following observation encoding). As we will show in Sec. 4, our approach not only outperforms [13], it is also much more computationally efficient.
|
| 41 |
+
|
| 42 |
+
# 2.3 End-to-End Driving Architectures
|
| 43 |
+
|
| 44 |
+
End-to-end architectures present a promising approach for creating integrated stacks that can internally leverage more information, e.g., uncertainty, from upstream components. Recent works such as UniAD [15], VAD [18], and OccNet [33] demonstrate the feasibility and performance of incorporating both rasterized and vectorized HD map estimation within end-to-end driving. For example, UniAD [15] and OccNet [33] formulate online mapping as a dense prediction task, aiming to generate the semantics of map elements at a per-pixel or voxel granularity, whereas VAD focuses on producing vectorized HD map representations. In these architectures, the utility of mapping is twofold: it is both an auxiliary training task and an internal static world representation that aids downstream tasks. While these approaches lead to highly-integrated autonomy stacks, the use of rasterized or vectorized representations (rather than BEV features) leads to information loss and extra computational burden. Accordingly, our work is complementary in that our proposed strategies can be incorporated within end-to-end stacks to improve inference speeds as well as downstream prediction and planning accuracy.
|
| 45 |
+
|
| 46 |
+
# 3 Leveraging Online Mapping Features in Trajectory Prediction
|
| 47 |
+
|
| 48 |
+
As mentioned in Sec. 2, the majority of state-of-the-art online vectorized map estimation models adopt a BEV grid internally to featurize the surrounding environment in a geometry-preserving fashion. Our method focuses on leveraging these internal BEV representations by directly accessing them in trajectory prediction. In doing so, we improve the flow of information from mapping to prediction and can even accelerate the combined system's runtime by skipping map decoding altogether (depending on the predictor's need for lane information).
|
| 49 |
+
|
| 50 |
+
Encoding Observations: Feature extractors in map estimation models aim to transform inputs from various vehicle-mounted sensors (e.g., cameras and LiDAR) into a unified feature space. Note that our work focuses on multi-camera observations, in line with the majority of state-of-the-art map estimation approaches. Formally, given a set of multi-view images $I_{t} = \{I_{1},\dots,I_{K}\}$ at time $t$ , map estimation models encode them using a standard backbone (e.g., ResNet-50 [14]) to generate corresponding multi-view feature maps $F_{t} = \{F_{1},\dots,F_{K}\}$ . The 2D image features $F_{t}$ are then converted into BEV features $B_{t}$ using a PV2BEV transformation. The two most common PV2BEV approaches are based on BEVFormer [20] and Lift-Splat-Shoot (LSS) [28].
|
| 51 |
+
|
| 52 |
+
BEVFormer [20] is a Transformer-based architecture that converts multicamera image features into BEV features. It employs a standard Transformer
|
| 53 |
+
|
| 54 |
+
encoder with specific enhancements: BEV queries $Q \in \mathbb{R}^{H \times W \times D}$ , spatial cross-attention, and temporal self-attention. First, temporal information is queried from historical BEV features $B_{t-1}$ through temporal self-attention,
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
\operatorname {T S A} \left(Q _ {p}, \{Q, B _ {t - 1} \}\right) = \sum_ {V \in \{Q, B _ {t - 1} \}} \operatorname {D e f o r m A t t n} \left(Q _ {p}, p, V\right), \tag {1}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
where $Q_{p} \in \mathbb{R}^{D}$ is the query for a single BEV grid point $p = (h, w)$ . The queries $Q$ are then employed to gather spatial information from the multi-camera features $F_{t}$ via a spatial cross-attention mechanism,
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\operatorname {S C A} \left(Q _ {p}, F _ {t}\right) = \frac {1}{\left| \mathcal {V} _ {\mathrm {h i t}} \right|} \sum_ {i \in \mathcal {V} _ {\mathrm {h i t}}} \sum_ {j = 1} ^ {N _ {\mathrm {r e f}}} \operatorname {D e f o r m A t t n} \left(Q _ {p}, \mathcal {P} (p, i, j), F _ {i, t}\right), \tag {2}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where $\mathcal{V}_{\mathrm{hit}}$ denotes the camera views that contain $p$ , $\mathcal{P}$ is the camera projection function from 3D world coordinates $(h, w)$ , and discrete height index $j$ to the 2D image plane of the $i^{\mathrm{th}}$ camera. This combined approach enables BEVFormer to efficiently understand temporal and spatial context, producing enhanced BEV features. As we will show in Sec. 4.2, incorporating temporal information in BEV features is quite beneficial for trajectory prediction.
|
| 67 |
+
|
| 68 |
+
Another common PV2BEV method is LSS [28]. Its first stage (Lift) factorizes individual images and converts them into a shared 3D frame via "unprojection", assigning multiple discrete depth points $(h, w, d) \in \mathbb{R}^3$ to each pixel in an image based on camera extrinsics and intrinsics. This forms a large point cloud with a 3D point at each depth per ray ( $HWD$ points). The second stage (Splat) aggregates these points into a common BEV feature grid using an efficient pillar pooling method.
|
| 69 |
+
|
| 70 |
+
Decoding Map Elements: To produce vectorized map elements, most map prediction models employ a Transformer-based decoder. They broadly consist of a hierarchical query embedding mechanism alongside Multihead Self Attention and Deformable Attention to accurately predict complex, irregular map elements from BEV features. Instance and point-level queries are combined for dynamic feature interaction, followed by classification and regression heads that predict the type and location of map element vertices, respectively. While such decoding architectures produce accurate maps, they are computationally expensive, and decoding occupies much of overall model runtime. MapTRv2 [23] attempts to address this by introducing more streamlined attention mechanisms in its decoder. In StreamMapNet [37], a Multi-Point Attention mechanism is utilized alongside a streaming approach that preserves previous queries and BEV features, aiming to improve map estimation performance by incorporating temporal information.
|
| 71 |
+
|
| 72 |
+
Behavior Prediction Models: Most state-of-the-art trajectory prediction models also leverage an encoder-decoder framework [30]. The encoder is responsible for capturing the scene's context, such as vectorized map elements (e.g., road edges and centerlines) as well as agent trajectories. The decoder then utilizes these encoded representations to forecast the future motion of agents in the scene. In the encoder, vectorized map elements are commonly encoded as either
|
| 73 |
+
|
| 74 |
+
nodes in a Graph Neural Network (GNN) or as tokens in a Transformer [34]. Two representative instantiations are DenseTNT [12] and HiVT [40], respectively.
|
| 75 |
+
|
| 76 |
+
At a high level, DenseTNT [12] leverages the VectorNet [8] hierarchical GNN context encoder to extract features from vectorized map elements. Agent trajectories and map element segments are first modeled as polyline subgraphs. Then, each resulting subgraph is further encoded as a node in a global GNN to capture their interactions. On the other hand, the Transformer-based HiVT [40] treats vectorized elements as sequences of tokens. Its hierarchical encoder consists of two stages: information within a local spatial window is encoded for each agent, followed by a global interaction encoder to model long-range interactions between agents.
|
| 77 |
+
|
| 78 |
+
Recent work [13] has explored strategies for coupling online-estimated vectorized maps and the above prediction models. However, as we will show in Sec. 4, their prediction performance and computational runtime can be further improved by harnessing the BEV features present within online map estimation models. Directly using BEV features provides prediction models access to richer information than the original decoded sets of polylines and polygons.
|
| 79 |
+
|
| 80 |
+
In the remainder of this section, we outline three different strategies for incorporating BEV features in downstream behavior prediction.
|
| 81 |
+
|
| 82 |
+
# 3.1 Modeling Agent-Lane Interactions via BEV Feature Attention
|
| 83 |
+
|
| 84 |
+
Inspired by the approach taken in Vision Transformer (ViT) [6], we treat map BEV features as an image, albeit with a channel dimension equal to the embedding dimension. We process the BEV tensor by first dividing it into a sequence of flattened BEV patches, i.e., a $N \times P^2 D$ tensor, where $D$ denotes the embedding dimension, $(P, P)$ represents the resolution of each image patch, and $N = HW / P^2$ is the total number of BEV patches. The flattened BEV features are then passed through a trainable linear projection to obtain an $N \times D$ patch embedding for each scene. This serves as the equivalent of an $N$ -length input sequence for a Transformer, allowing for attention mechanisms to be applied.
|
| 85 |
+
|
| 86 |
+
In this first strategy for incorporating BEV features in behavior prediction, visualized in Fig. 2, we alter the modeling of agent-lane interactions. We select BEV grid patches that correspond to agent positions and attend to every other patch in the scene, providing an understanding of the environment surrounding the vehicle. Formally,
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\mathbf {e} _ {A} = \operatorname {M H A} \left(Q _ {A}, K _ {M}, V _ {M}\right), \tag {3}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where MHA denotes multi-headed attention, $Q_{A} \in \mathbb{R}^{M \times D}$ denotes agent patches (queries), $K_{M} = V_{M} \in \mathbb{R}^{N \times D}$ denote all map patches (keys and values), and $\mathbf{e}_{A}$ are the resulting agent-BEV embeddings. As an additional benefit, by computing attention with agent patches, the computational complexity is only $M \times N$ operations, where $M$ denotes the number of agents (most commonly, $M \ll N$ ).
|
| 93 |
+
|
| 94 |
+
To verify our approach, we modify the state-of-the-art Transformer-based prediction model HiVT [40]. It employs a hierarchical Transformer structure with a low-level Transformer that encodes agent-lane interactions within a local
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
Fig. 2: Three different strategies for incorporating BEV features in behavior prediction. Left: local region attention to encode agent-map interaction; Middle: augmenting lane vertices with BEV features; Right: replacing agent trajectories with temporal BEV features.
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
|
| 109 |
+
neighborhood, followed by a high-level Transformer that models global interactions across the entire scene. Note that, by replacing the local agent-lane interaction encoder with the agent-BEV attention in Eq. (3), we completely remove the use of vectorized lane information in HiVT. The agent-BEV features are then concatenated with agent-agent interactions (modeled via self-attention in HiVT) followed by a linear projection for the global interaction module to process. As we will show in Sec. 4, the prediction performance of HiVT and overall system runtime are greatly enhanced by directly encoding BEV features.
|
| 110 |
+
|
| 111 |
+
# 3.2 Augmenting Estimated Lanes with BEV Features
|
| 112 |
+
|
| 113 |
+
While Sec. 3.1 completely substitutes vectorized map data, another strategy is to augment existing lane information with BEV features (e.g., via concatenation). We first refine the BEV features to match the dimensionality of the latent space associated with raw lane information using a one-dimensional CNN. We then determine the BEV grid positions corresponding to the locations of each map vertex and concatenate the original vertex features (i.e., their positions) with their corresponding BEV features. In doing so, we aim to provide a more comprehensive summary of lane information to downstream modules.
|
| 114 |
+
|
| 115 |
+
In our work, we instantiate this strategy with a combination of the MapTR line of work [22, 23] and DenseTNT [12]. DenseTNT, in particular, is emblematic of prediction models that are heavily map-dependent. It requires lane information at virtually every stage of its pipeline, from initial sparse context encoding to end-point sampling and scoring to guiding predictions during decoding. With such a heavy reliance on vector maps, we cannot eliminate the use of lanes completely as in Sec. 3.1. Instead, we focus on enriching the estimated map vertices by incorporating their corresponding BEV features in DenseTNT's input layer. Specifically, the enriched map elements are encoded using a VectorNet [8] backbone which we augment with a larger layer size to accommodate the increase
|
| 116 |
+
|
| 117 |
+
in feature dimensionality (full details can be found in the appendix). As we will show in Sec. 4, incorporating BEV features in this manner significantly improves the performance of the associated behavior predictor.
|
| 118 |
+
|
| 119 |
+
# 3.3 Replacing Agent Information with Temporal BEV Features
|
| 120 |
+
|
| 121 |
+
Operating from streaming inputs is a common requirement of online mapping methods deployed on embedded devices. Methods like StreamMapNet [37] introduce a memory buffer to preserve query data and BEV features from previous frames, combining them with the BEV features acquired in the current frame. This introduction of temporal information into the BEV representation enables our third strategy for incorporating BEV features in behavior prediction: replacing agent information with their corresponding BEV features.
|
| 122 |
+
|
| 123 |
+
In prediction models, agent trajectories are commonly the only source of temporal information during scene context encoding. Vectorized HD maps provide a static understanding of the scene, with fixed road geometry and semantics. While this static-dynamic separation is explicitly handled by prediction architectures, StreamMapNet's approach captures temporal information with a one-step historical BEV feature fusion, enabling it to also capture information about dynamic agents. To leverage StreamMapNet's encoding of both static and dynamic information, we additionally modify DenseTNT [12] to replace the agent subgraphs encoded in VectorNet [8] with the agent-BEV features obtained using the attention mechanism in Eq. (3). In doing so, agent trajectory information is completely discarded in DenseTNT [12]. Even so, as we will show in Sec. 4, DenseTNT [12] is able to leverage the implicit trajectory information encoded in the dynamic BEV features and predicts significantly more accurate trajectories.
|
| 124 |
+
|
| 125 |
+
# 4 Experiments
|
| 126 |
+
|
| 127 |
+
Dataset. We evaluate our method on the large-scale nuScenes dataset [1], which includes ground truth (GT) HD maps, multi-sensor data, and tracked agent trajectories. It consists of 1000 driving scenarios with sensor data recorded at $10\mathrm{Hz}$ and annotated at $2\mathrm{Hz}$ (i.e., every 5th frame is a keyframe), and is divided into train, validation, and test sets with 500, 200, and 150 scenarios, respectively.
|
| 128 |
+
|
| 129 |
+
Our work leverages the unified trajdata [17] interface to standardize the data representation between vectorized map estimation models and downstream prediction models. To ensure compatibility across various prediction and mapping models, we upsample the nuScenes trajectory data frequency to $10\mathrm{Hz}$ (matching the sensor frequency) using trajdata's temporal interpolation utilities. Each prediction model is then tasked with forecasting vehicle motion 3 seconds into the future using observations from the preceding 2 seconds.
|
| 130 |
+
|
| 131 |
+
Metrics. To evaluate trajectory prediction performance, we adopt standard evaluation metrics used in many recent prediction challenges [1,3,7,35]: minimum Average Displacement Error (minADE), minimum Final Displacement Error (minFDE) and Miss Rate (MR). Specifically, minADE evaluates the average Euclidean $(\ell_2)$ distance between the most-accurately predicted trajectory
|
| 132 |
+
|
| 133 |
+
<table><tr><td>Prediction Method</td><td colspan="3">HiVT [40]</td><td colspan="3">DenseTNT [12]</td></tr><tr><td>Online HD Map Method</td><td>minADE ↓</td><td>minFDE ↓</td><td>MR ↓</td><td>minADE ↓</td><td>minFDE ↓</td><td>MR ↓</td></tr><tr><td>Map TR [22]</td><td>0.4234</td><td>0.8900</td><td>0.0955</td><td>1.0462</td><td>2.0661</td><td>0.3494</td></tr><tr><td>Map TR [22] + Unc [13]</td><td>0.4036</td><td>0.8372</td><td>0.0822</td><td>1.1190</td><td>2.1502</td><td>0.3669</td></tr><tr><td>Map TR [22] + Ours</td><td>0.3617 (-15%)</td><td>0.7401 (-17%)</td><td>0.0720 (-25%)</td><td>0.7608 (-27%)</td><td>1.4700 (-29%)</td><td>0.2593 (-26%)</td></tr><tr><td>Map TRv2 [23]</td><td>0.3950</td><td>0.8310</td><td>0.0894</td><td>1.2648</td><td>2.3481</td><td>0.4043</td></tr><tr><td>Map TRv2 [23] + Unc [13]</td><td>0.3896</td><td>0.8085</td><td>0.0859</td><td>1.3228</td><td>2.4821</td><td>0.4406</td></tr><tr><td>Map TRv2 [23] + Ours</td><td>0.3844 (-3%)</td><td>0.7848 (-6%)</td><td>0.0741 (-17%)</td><td>1.1232 (-11%)</td><td>2.3000 (-2%)</td><td>0.4025 (0%)</td></tr><tr><td>Map TRv2-CL [23]</td><td>0.3657</td><td>0.7473</td><td>0.0710</td><td>0.7664</td><td>1.3174</td><td>0.1547</td></tr><tr><td>Map TRv2-CL [23] + Unc [13]</td><td>0.3588</td><td>0.7232</td><td>0.0660</td><td>0.8123</td><td>1.3426</td><td>0.1567</td></tr><tr><td>Map TRv2-CL [23] + Ours</td><td>0.3652 (0%)</td><td>0.7323 (-2%)</td><td>0.0710 (0%)</td><td>0.7630 (0%)</td><td>1.3609 (+3%)</td><td>0.1576 (+2%)</td></tr><tr><td>StreamMapNet [37]</td><td>0.4035</td><td>0.8569</td><td>0.0996</td><td>0.8864</td><td>1.7050</td><td>0.2467</td></tr><tr><td>StreamMapNet [37] + Unc [13]</td><td>0.3907</td><td>0.8034</td><td>0.0812</td><td>0.9220</td><td>1.6851</td><td>0.2310</td></tr><tr><td>StreamMapNet [37] + Ours</td><td>0.3800 (-6%)</td><td>0.7709 (-10%)</td><td>0.0746 (-25%)</td><td>0.7377 (-17%)</td><td>1.3661 (-20%)</td><td>0.1987 (-19%)</td></tr></table>
|
| 134 |
+
|
| 135 |
+
Table 1: Virtually every combination of mapping and prediction benefits from directly leveraging upstream BEV features on the nuScenes [1] dataset, with certain combinations achieving performance improvements of $25\%$ or more. Percent values denote the relative improvement in prediction performance achieved by our approach.
|
| 136 |
+
|
| 137 |
+
(of 6 predicted trajectories) and the GT trajectory across the prediction horizon. minFDE measures the $\ell_2$ distance between the end point of these two trajectories. MR refers to the proportion of predicted trajectories that have an FDE of more than 2 meters from the GT endpoint.
|
| 138 |
+
|
| 139 |
+
Models and Training. To evaluate the effect of incorporating BEV features in downstream prediction models, we train DenseTNT [12] and HiVT [40] on the outputs of four online mapping models (MapTR [22], MapTRv2 [23], MapTRv2 with Centerlines [23], and StreamMapNet [37]) in one of three setups: Baseline (using vectorized inputs), Uncertainty-enhanced (where each map element vertex contains spatial uncertainty information) [13], and Ours (one of the BEV feature attention strategies detailed in Sec. 3), yielding a total of 24 model combinations.
|
| 140 |
+
|
| 141 |
+
In particular, we first train the four online map estimation models to convergence following the models' original training recipes in the baseline setup or the training recipes of [13] in the uncertainty-enhanced setup. We then extract BEV features from each model and scene, and train behavior prediction models according to the above three setups: using only vectorized map information as a baseline, leveraging uncertainty as in [13], and our approach of leveraging BEV features as in Sec. 3. All models are trained on a single RTX4090 GPU. Full model and training details can be found in the appendix.
|
| 142 |
+
|
| 143 |
+
# 4.1 Leveraging BEV Features in Behavior Prediction
|
| 144 |
+
|
| 145 |
+
Prediction Accuracy Improvements. As shown in Tab. 1, for virtually all mapping/prediction model combinations, incorporating BEV features leads to significantly better prediction accuracy, not only compared to the baseline models but also to the uncertainty-enhanced approach. The largest improvements (up to $25\%$ and more) are in MR and minFDE, suggesting that latent BEV features can especially help with long-horizon prediction performance. Endpoint prediction accuracy is especially important for trajectory prediction as it directly impacts later planning accuracy.
|
| 146 |
+
|
| 147 |
+
Note that, while MapTR [22] does not perform as well as its successor (MapTRv2 [23]) or the temporally-enhanced StreamMapNet [37], its BEV features
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
Fig. 3: Our integrated BEV-prediction approach runs faster than decoupled baselines across all scenario sizes (number of agents and map elements) and mapping models.
|
| 155 |
+
|
| 156 |
+
provide the largest improvement to downstream performance, yielding prediction accuracy that outperforms combinations with MapTRv2 [23] (without centerlines). This result suggests that MapTR's decoder may be introducing unwanted noise, which impedes its ability to generate precise map elements. Accordingly, by leveraging information from earlier stages of MapTR [22], both HiVT [40] and DenseTNT [12] obtain significant improvements in prediction performance, emphasizing the benefits of deeper integrations between mapping and prediction through an intermediate BEV representation.
|
| 157 |
+
|
| 158 |
+
The additional production of centerlines in MapTRv2-CL [23] yields the most accurate predictions overall. Accordingly, the benefits of incorporating BEV features are the least pronounced. This result reaffirms the utility of centerlines for trajectory prediction and provides guidance for future map estimation research regarding which type of map element is most useful.
|
| 159 |
+
|
| 160 |
+
Finally, although StreamMapNet's BEV features are leveraged in two completely different ways (as a substitute for lane information in HiVT [40] and as a substitute for agent information in DenseTNT [12]), they both provide significant improvements to the baseline prediction models. This indicates that temporal information not only helps in decoding more accurate maps, it also provides a temporal understanding of the behavior of agents which is particularly valuable for trajectory prediction.
|
| 161 |
+
|
| 162 |
+
Inference Speedup. In Fig. 3, we compare the GPU inference speedup achieved by our integrated approach (described in Sec. 3.1) relative to decoupled baselines for HiVT [40]. For both approaches, runtime is measured starting from the processing of the input RGB images and ending at the output of the final trajectories. As can be seen, our approach results in significant inference time improvements due to its elimination of the time-consuming map decoding stage. Specifically, our integrated method is $42 - 73\%$ faster than MapTR [22] and HiVT
|
| 163 |
+
|
| 164 |
+
[40] alone, $35 - 62\%$ faster than MapTRv2 [23] and HiVT [40] alone, and $8 - 15\%$ faster than StreamMapNet [37] and HiVT [40] alone. Identical improvements are obtained when compared to the uncertainty-integrated method [13], reaffirming the strength of our approach.
|
| 165 |
+
|
| 166 |
+
Further, the overall inference time of the baseline mapping and prediction models scales with both the number of map elements and number of agents present in a scene. In contrast, our integrated approach is much less sensitive to the number of map elements, and this is reflected in Fig. 3 where our approach's runtime improves more as the number of map elements increases. This occurs because each agent's patch attends to every other BEV patch in Sec. 3.1. Thus, even if the number of map elements is high, the number of BEV patches is fixed, significantly reducing inference time by eliminating the need to process map elements through HiVT's encoder.
|
| 167 |
+
|
| 168 |
+
In scenarios where the number of map elements is low but the number of agents is high (top left of Fig. 3), the reduction in processing time is less pronounced. Given the smaller quantity of map elements, HiVT [40] naturally requires less time to encode them. Further, with an increase in the number of agents, the approach in Sec. 3.1 must perform additional attention operations as each new agent introduces an extra $N$ attention operations. This increase in computation partially offsets the savings achieved by not processing vectorized map elements, leading to smaller improvements in run time. Nevertheless, our approach still yields substantial reductions in inference time.
|
| 169 |
+
|
| 170 |
+
# 4.2 Ablation Studies
|
| 171 |
+
|
| 172 |
+
Patch Size. Tab. 2 illustrates the effect of BEV feature patch size $P$ on prediction performance. A patch size that is too small results in insufficient information capture. For instance, MapTRv2 operates with a $60m \times 30m$ perception range and has a BEV dimension $(H \times W)$ of $200 \times 100$ , meaning each BEV grid cell represents a $0.3m \times 0.3m$ square in the real world. This size is relatively small, particularly as we wish to capture global information in the scene. As shown in Tab. 2, a patch size of $10 \times 5$ (covering $3m \times 1.5m$ ) underperforms compared to a more moderately-sized $20 \times 20$ patch $(6m \times 6m)$ . Larger patch sizes do not yield performance improvements, however, likely due to the loss of granular information when converting patches into smaller vector embeddings (projecting from $P^2D$ to $D$ dimensions), which also inhibits prediction accuracy as reflected by worsening minFDE and MR values. Overall, we find that a patch size of $20 \times 20$ yields the best prediction performance.
|
| 173 |
+
|
| 174 |
+
BEV Encoder Selection. As mentioned in Sec. 2, map estimation models typically employ one of two distinct PV2BEV encoders: BEVFormer [20], which is used in MapTR [22] and StreamMapNet [37], and LSS [28], used in MapTRv2 [23] and MapTRv2-Centerline [23]. As described in Sec. 3, BEV-Former [20] enhances its BEV features by integrating historical BEV features $(B_{t - 1})$ through temporal self-attention. In contrast, LSS [28] processes only data from the current frame. This divergence in backbone mechanisms leads to notable performance disparities between MapTR [22] and its subsequent deriva
|
| 175 |
+
|
| 176 |
+
<table><tr><td>Models</td><td colspan="3">MapTRv2-Centerline [23] and HiVT [40]</td></tr><tr><td>Patch Sizes</td><td>minADE ↓</td><td>minFDE ↓</td><td>MR ↓</td></tr><tr><td>(10, 5)</td><td>0.3845</td><td>0.8003</td><td>0.0853</td></tr><tr><td>(20, 10)</td><td>0.3729</td><td>0.7616</td><td>0.0749</td></tr><tr><td>(20, 20)</td><td>0.3728</td><td>0.7518</td><td>0.0737</td></tr><tr><td>(20, 25)</td><td>0.3709</td><td>0.7649</td><td>0.0761</td></tr><tr><td>(40, 20)</td><td>0.3737</td><td>0.7583</td><td>0.0770</td></tr></table>
|
| 177 |
+
|
| 178 |
+
Table 2: An exploration of BEV patch sizes reveals that there are detriments to having patches that are too small (insufficient information capture) or too large (granular information loss), with the best performance achieved by a patch size of $20 \times 20$ (corresponding to $6m \times 6m$ in the real world).
|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
Fig. 4: StreamMapNet [37] and HiVT [40] combined using the strategy in Sec. 3.1. By replacing lane information with temporal BEV features, HiVT is able to keep its predicted trajectories in the current lane, closely aligning with the GT trajectory.
|
| 184 |
+
|
| 185 |
+
tives MapTRv2 [23] and MapTRv2-Centerline [23]. Despite the use of a similar decoding mechanism (a hierarchical query embedding scheme followed by hierarchical bipartite matching), the utilization of an LSS-based BEV extractor results in a lack of temporal information in MapTRv2's resulting BEV features. This results in relative inefficiency within our methodology, showcasing only modest enhancements when compared against integrations with MapTR [22] and StreamMapNet [37].
|
| 186 |
+
|
| 187 |
+
This divergence is empirically observed in Tab. 1, where MapTRv2 and MapTRv2-Centerline's BEV features (which only encode static information) yield a mere relative improvement of $4\%$ , $2\%$ and $4\%$ in minADE, minFDE, and MR, respectively. In comparison, the use of temporally-informed BEV features in MapTR [22] and StreamMapNet [37] yields relative performance enhancements of $16\%$ , $19\%$ , and $24\%$ in minADE, minFDE, and MR for both mapping/prediction model combinations. These improvements underscore the utility of integrating temporal dynamics into BEV features for improved trajectory forecasting.
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
Fig. 5: MapTR [22] and DenseTNT [12] combined via the strategy in Sec. 3.2. Our augmentation of map vertices with BEV features enables DenseTNT to produce very accurate trajectories, preventing the road boundary incursions seen in the Baseline and Uncertainty-enhanced [13] setups.
|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
|
| 194 |
+
# 4.3 Qualitative Comparisons
|
| 195 |
+
|
| 196 |
+
BEV Feature Visualization. Aside from estimated maps and their uncertainties, we also visualize the corresponding BEV features for each test scene in Figs. 4 to 6. These visualizations are obtained by first reducing the dimensionality of each BEV grid cell to a single value using principal component analysis (PCA), followed by normalization to [0,255], creating a grayscale image. The resulting BEV feature images form the background of our method plots.
|
| 197 |
+
|
| 198 |
+
In instances where SteamMapNet [37] serves as the mapping model (Figs. 4 and 6), a distinct separation is observed between driveable areas (gray) and the non-driveable areas (white) beyond the designated boundaries. This distinction highlights the comprehensive geometric information captured within the temporal BEV features of SteamMapNet [37], enabling the implicit modeling of map characteristics from BEV features alone. This interpretation is inverted in MapTR's BEV features (Fig. 5), white indicates driveable areas and gray signifies non-driveable regions. In both cases, BEV features play a critical role in informing future predictions.
|
| 199 |
+
|
| 200 |
+
Fig. 4 visualizes a parking lot next to a building. Utilizing the temporal BEV features from StreamMapNet [37], HiVT [40] is able to have all six predicted trajectories tightly clustered around the GT. This precision allows HiVT [40] to maintain lane discipline, avoiding encroaching into opposing lanes (seen in the Baseline and Uncertainty setups).
|
| 201 |
+
|
| 202 |
+
In Fig. 5, MapTR's estimated map vertices are augmented with their corresponding BEV features. Specifically, the grey area depicted ahead of the center vehicle covers part of the road boundary, which provides an additional nondrivable signal and reinforces the presence of a road border. By leveraging this extra information, DenseTNT [12] much more effectively restricts the vehicle's trajectories from crossing into the non-drivable area. In contrast, both the Baseline and Uncertainty-enhanced approaches produce trajectories that inter
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Fig. 6: StreamMapNet [37] and DenseTNT [12] combined using the strategy in Sec. 3.3. By replacing agent trajectory information with BEV features, DenseTNT is able to predict trajectories that stop before the crosswalk, compared to the undershooting and overshooting of the Baseline and Uncertainty-enhanced [13] approaches.
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+
sect with the road boundary and lane divider. By incorporating BEV features, DenseTNT [12] is able to confine its future predictions to the designated white, driveable area, showing the utility of BEV features in improving map adherence.
|
| 210 |
+
|
| 211 |
+
In Fig. 6, the Baseline predicted trajectories undershoot the GT, whereas the Uncertainty-enhanced approach overshoots the pedestrian crosswalk entirely. By encoding the BEV features directly for agent information, our approach strikes a balance and produces trajectories that align well with the GT trajectory.
|
| 212 |
+
|
| 213 |
+
# 5 Conclusion
|
| 214 |
+
|
| 215 |
+
In this work, we propose three different strategies to leverage the intermediate BEV features within online map estimation models in downstream tasks such as behavior prediction. We systematically evaluate the benefits of different BEV encoding strategies and demonstrate how incorporating BEV features in downstream behavior prediction results in significant performance and runtime improvements. In particular, combinations of various online mapping and prediction methods achieve up to $73\%$ faster inference times when operating directly from intermediate BEV features and produce predictions that are up to $29\%$ more accurate across a variety of evaluation metrics.
|
| 216 |
+
|
| 217 |
+
Our work's limitations and potential negative impacts relate to its use of black-box features in lieu of vectorized map estimation. While this yields performance and runtime improvements, it may complicate introspection into why a behavior prediction algorithm made certain predictions (compared to when explicit map elements are encoded). Towards this end, exciting future directions include further explorations of mapping models' BEV feature spaces, strategies to interpret BEV features at runtime (alternatives to costly decoding processes), and co-training strategies to inform upstream map estimation models of the task of behavior prediction (ideally yielding improvements in both mapping and prediction performance, towards the development of end-to-end AV stacks).
|
| 218 |
+
|
| 219 |
+
# References
|
| 220 |
+
|
| 221 |
+
1. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuScenes: A multimodal dataset for autonomous driving. In: IEEE Conf. on Computer Vision and Pattern Recognition (2020)
|
| 222 |
+
2. Can, Y.B., Liniger, A., Paudel, D.P., Gool, L.V.: Structured bird's-eye-view traffic scene understanding from onboard images. In: IEEE Int. Conf. on Computer Vision (2021)
|
| 223 |
+
3. Chang, M.F., Lambert, J., Sangkloy, P., Singh, J., Bak, S., Hartnett, A., Wang, D., Carr, P., Lucey, S., Ramanan, D., Hays, J.: Argoverse: 3d tracking and forecasting with rich maps. In: IEEE Conf. on Computer Vision and Pattern Recognition (2019)
|
| 224 |
+
4. Deo, N., Wolff, E.M., Beijbom, O.: Multimodal trajectory prediction conditioned on lane-graph traversals. In: Conf. on Robot Learning (2021)
|
| 225 |
+
5. Dong, H., Zhang, X., Xu, J., Ai, R., Gu, W., Lu, H., Kannala, J., Chen, X.: SuperFusion: Multilevel LiDAR-camera fusion for long-range HD map generation. arXiv preprint arXiv:2211.15656 (2022)
|
| 226 |
+
6. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words: Transformers for image recognition at scale. In: Int. Conf. on Learning Representations (2021)
|
| 227 |
+
7. Ettinger, S., Cheng, S., Caine, B., Liu, C., Zhao, H., Pradhan, S., Chai, Y., Sapp, B., Qi, C., Zhou, Y., Yang, Z., Chouard, A., Sun, P., Ngiam, J., Vasudevan, V., McCauley, A., Shlens, J., Anguelov, D.: Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In: IEEE Int. Conf. on Computer Vision (2021)
|
| 228 |
+
8. Gao, J., Sun, C., Zhao, H., Shen, Y., Anguelov, D., Li, C., Schmid, C.: VectorNet: Encoding HD maps and agent dynamics from vectorized representation. In: IEEE Conf. on Computer Vision and Pattern Recognition (2020)
|
| 229 |
+
9. Gilles, T., Sabatini, S., Tsishkou, D., Stanciulescu, B., Moutarde, F.: HOME: Heatmap output for future motion estimation. In: Proc. IEEE Int. Conf. on Intelligent Transportation Systems (2021)
|
| 230 |
+
0. Gilles, T., Sabatini, S., Tsishkou, D., Stanciulescu, B., Moutarde, F.: GOHOME: Graph-oriented heatmap output for future motion estimation. In: Proc. IEEE Conf. on Robotics and Automation (2022)
|
| 231 |
+
1. Gilles, T., Sabatini, S., Tsishkou, D., Stanciulescu, B., Moutarde, F.: THOMAS: Trajectory heatmap output with learned multi-agent sampling. In: Int. Conf. on Learning Representations (2022)
|
| 232 |
+
2. Gu, J., Sun, C., Zhao, H.: DenseTNT: End-to-end trajectory prediction from dense goal sets. In: IEEE Int. Conf. on Computer Vision (2021)
|
| 233 |
+
3. Gu, X., Song, G., Gilitschenski, I., Pavone, M., Ivanovic, B.: Producing and leveraging online map uncertainty in trajectory prediction. In: IEEE Conf. on Computer Vision and Pattern Recognition (2024)
|
| 234 |
+
4. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: IEEE Conf. on Computer Vision and Pattern Recognition (2016)
|
| 235 |
+
5. Hu, Y., Yang, J., Chen, L., Li, K., Sima, C., Zhu, X., Chai, S., Du, S., Lin, T., Wang, W., Lu, L., Jia, X., Liu, Q., Dai, J., Qiao, Y., Li, H.: Planning-oriented autonomous driving. In: IEEE Conf. on Computer Vision and Pattern Recognition (2023)
|
| 236 |
+
|
| 237 |
+
16. Ivanovic, B., Harrison, J., Pavone, M.: Expanding the deployment envelope of behavior prediction via adaptive meta-learning. In: Proc. IEEE Conf. on Robotics and Automation (2023), https://arxiv.org/abs/2209.11820
|
| 238 |
+
17. Ivanovic, B., Song, G., Gilitschenski, I., Pavone, M.: trajdata: A unified interface to multiple human trajectory datasets. In: Conf. on Neural Information Processing Systems Datasets and Benchmarks Track. New Orleans, USA (Dec 2023), https://arxiv.org/abs/2307.13924
|
| 239 |
+
18. Jiang, B., Chen, S., Xu, Q., Liao, B., Chen, J., Zhou, H., Zhang, Q., Liu, W., Huang, C., Wang, X.: VAD: Vectorized scene representation for efficient autonomous driving. In: IEEE Int. Conf. on Computer Vision (2023)
|
| 240 |
+
19. Li, Q., Wang, Y., Wang, Y., Zhao, H.: HDMapNet: An online HD map construction and evaluation framework. In: Proc. IEEE Conf. on Robotics and Automation (2022)
|
| 241 |
+
20. Li, Z., Wang, W., Li, H., Xie, E., Sima, C., Lu, T., Qiao, Y., Dai, J.: BEVFormer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In: European Conf. on Computer Vision (2022)
|
| 242 |
+
21. Liang, M., Yang, B., Hu, R., Chen, Y., Liao, R., Feng, S., Urtasun, R.: Learning lane graph representations for motion forecasting. In: European Conf. on Computer Vision (2020)
|
| 243 |
+
22. Liao, B., Chen, S., Wang, X., Cheng, T., Zhang, Q., Liu, W., Huang, C.: MapTR: Structured modeling and learning for online vectorized HD map construction. In: Int. Conf. on Learning Representations (2023)
|
| 244 |
+
23. Liao, B., Chen, S., Zhang, Y., Jiang, B., Zhang, Q., Liu, W., Huang, C., Wang, X.: MapTRv2: An end-to-end framework for online vectorized HD map construction. arXiv preprint arXiv:2308.05736 (2023)
|
| 245 |
+
24. Liu, Y., Yuantian, Y., Wang, Y., Wang, Y., Zhao, H.: VectorMapNet: End-to-end vectorized HD map learning. In: Int. Conf. on Machine Learning. PMLR (2023)
|
| 246 |
+
25. Liu, Y., Zhang, J., Fang, L., Jiang, Q., Zhou, B.: Multimodal motion prediction with stacked transformers. In: IEEE Conf. on Computer Vision and Pattern Recognition (2021)
|
| 247 |
+
26. Liu, Z., Tang, H., Amini, A., Yang, X., Mao, H., Rus, D., Han, S.: BEVFusion: Multi-task multi-sensor fusion with unified bird's-eye view representation. In: Proc. IEEE Conf. on Robotics and Automation (2023)
|
| 248 |
+
27. Phan-Minh, T., Grigore, E.C., Boulton, F.A., Beijbom, O., Wolff, E.M.: CoverNet: Multimodal behavior prediction using trajectory sets. In: IEEE Conf. on Computer Vision and Pattern Recognition (2020)
|
| 249 |
+
28. Philion, J., Fidler, S.: Lift, Splat, Shoot: Encoding images from arbitrary camera rigs by implicitly unprojecting to 3D. In: European Conf. on Computer Vision (2020)
|
| 250 |
+
29. Qiao, L., Ding, W., Qiu, X., Zhang, C.: End-to-end vectorized HD-map construction with piecewiseBezier curve. In: IEEE Conf. on Computer Vision and Pattern Recognition (2023)
|
| 251 |
+
30. Rudenko, A., Palmieri, L., Herman, M., Kitani, K.M., Gavrila, D.M., Arras, K.O.: Human motion trajectory prediction: A survey. Int. Journal of Robotics Research 39(8), 895-935 (2020)
|
| 252 |
+
31. Salzmann, T., Ivanovic, B., Chakravarty, P., Pavone, M.: Trajectory++: Dynamically-feasible trajectory forecasting with heterogeneous data. In: European Conf. on Computer Vision (2020), https://arxiv.org/abs/2001.03093
|
| 253 |
+
32. Shin, J., Rameau, F., Jeong, H., Kum, D.: InstaGram: Instance-level graph modeling for vectorized HD map learning. arXiv preprint arXiv:2301.04470 (2023)
|
| 254 |
+
|
| 255 |
+
33. Tong, W., Sima, C., Wang, T., Chen, L., Wu, S., Deng, H., Gu, Y., Lu, L., Luo, P., Lin, D., Li, H.: Scene as occupancy. In: IEEE Int. Conf. on Computer Vision (2023)
|
| 256 |
+
34. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Conf. on Neural Information Processing Systems (2017)
|
| 257 |
+
35. Wilson, B., Qi, W., Agarwal, T., Lambert, J., Singh, J., Khandelwal, S., Pan, B., Kumar, R., Hartnett, A., Pontes, J.K., Ramanan, D., Carr, P., Hays, J.: Argoverse 2: Next generation datasets for self-driving perception and forecasting. In: Conf. on Neural Information Processing Systems Datasets and Benchmarks Track (2021)
|
| 258 |
+
36. Xu, Z., Wong, K.K., Zhao, H.: InsightMapper: A closer look at inner-instance information for vectorized high-definition mapping. arXiv preprint arXiv:2308.08543 (2023)
|
| 259 |
+
37. Yuan, T., Liu, Y., Wang, Y., Wang, Y., Zhao, H.: StreamMapNet: Streaming mapping network for vectorized online HD map construction. In: IEEE Winter Conf. on Applications of Computer Vision (2024)
|
| 260 |
+
38. Yuan, Y., Weng, X., Ou, Y., Kitani, K.M.: AgentFormer: Agent-aware transformers for socio-temporal multi-agent forecasting. In: IEEE Int. Conf. on Computer Vision. pp. 9813-9823 (2021)
|
| 261 |
+
39. Zhao, H., Gao, J., Lan, T., Sun, C., Sapp, B., Varadarajan, B., Shen, Y., Shen, Y., Chai, Y., Schmid, C., Li, C., Anguelov, D.: TNT: Target-driveN Trajectory Prediction. In: Conf. on Robot Learning (2020)
|
| 262 |
+
40. Zhou, Z., Ye, L., Wang, J., Wu, K., Lu, K.: HiVT: Hierarchical vector transformer for multi-agent motion prediction. In: IEEE Conf. on Computer Vision and Pattern Recognition (2022)
|
acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85754dfe2e605dbeb5f15c4a27de5a2b0a89aa4d84ec1e31e3b1f3444596de87
|
| 3 |
+
size 437087
|
acceleratingonlinemappingandbehaviorpredictionviadirectbevfeatureattention/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e65eb580e5c34750c3ec07cf8282a7a10a0fb2baa667dca29dca75f56367d5e2
|
| 3 |
+
size 353090
|
acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/3a7ac274-49d3-416c-93e4-fbc4558fc467_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:40764419c90ccbc4385e7f182e0d63331b0d6d8b9de133b726620675ef932f5a
|
| 3 |
+
size 95275
|
acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/3a7ac274-49d3-416c-93e4-fbc4558fc467_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:87b4c81b00833dac4d2a5731bb8fbef66656e2ffa3e489eb36148fae118c24c6
|
| 3 |
+
size 123740
|
acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/3a7ac274-49d3-416c-93e4-fbc4558fc467_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:63af7ed786eb7eecd01c175484635658e72d32692a7e573201717e4095320f7d
|
| 3 |
+
size 2494178
|
acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/full.md
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Comprehensive Study of Multimodal Large Language Models for Image Quality Assessment
|
| 2 |
+
|
| 3 |
+
Tianhe Wu $^{1,2}$ , Kede Ma $^{2(\boxtimes)}$ , Jie Liang $^{3}$ , Yujiu Yang $^{1(\boxtimes)}$ , and Lei Zhang $^{3,4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Tsinghua University
|
| 6 |
+
$^{2}$ Department of Computer Science, City University of Hong Kong
|
| 7 |
+
$^{3}$ OPPO Research Institute
|
| 8 |
+
$^{4}$ Department of Computing, The Hong Kong Polytechnic University wth22@mails.tsinghua.edu.cn, kede.ma@cityu.edu.hk, liang27jie@gmail.com, yang.yujiu@sz.tsinghua.edu.cn, cslzhang@comp.polyu.edu.hk https://github.com/TianheWu/MLMs-for-IQA
|
| 9 |
+
|
| 10 |
+

|
| 11 |
+
Fig. 1: Illustration of visual attributes of image quality in our experiments.
|
| 12 |
+
|
| 13 |
+
Abstract. While Multimodal Large Language Models (MLLMs) have experienced significant advancement in visual understanding and reasoning, their potential to serve as powerful, flexible, interpretable, and text-driven models for Image Quality Assessment (IQA) remains largely unexplored. In this paper, we conduct a comprehensive and systematic
|
| 14 |
+
|
| 15 |
+
study of prompting MLLMs for IQA. We first investigate nine prompting systems for MLLMs as the combinations of three standardized testing procedures in psychophysics (i.e., the single-stimulus, double-stimulus, and multiple-stimulus methods) and three popular prompting strategies in natural language processing (i.e., the standard, in-context, and chain-of-thought prompting). We then present a difficult sample selection procedure, taking into account sample diversity and uncertainty, to further challenge MLLMs equipped with the respective optimal prompting systems. We assess three open-source and one closed-source MLLMs on several visual attributes of image quality (e.g., structural and textural distortions, geometric transformations, and color differences) in both full-reference and no-reference scenarios. Experimental results show that only the closed-source GPT-4V provides a reasonable account for human perception of image quality, but is weak at discriminating fine-grained quality variations (e.g., color differences) and at comparing visual quality of multiple images, tasks humans can perform effortlessly.
|
| 16 |
+
|
| 17 |
+
Keywords: Image quality assessment $\cdot$ Multimodal large language models $\cdot$ Model comparison
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
The evolution of Large Language Models (LLMs) has marked a significant milestone in the field of Artificial Intelligence (AI) [2,6,47,51]. The underlying idea of scaling the model size and training data [19] has rendered LLMs the abilities to perform various Natural Language Processing (NLP) tasks with unprecedented levels of accuracy.
|
| 22 |
+
|
| 23 |
+
In the midst of these developments, a particularly promising offshoot has emerged in the form of Multimodal LLMs (MLLMs). These advanced models have taken the capabilities of LLMs a step further by incorporating visual data alongside text [4, 13, 29, 43, 59, 62]. MLLMs typically integrate visual data via Vision Transformers (ViTs) [14] for feature extraction, attention mechanisms [48] for visual-textual relationship modeling, and connector modules [3, 16, 24, 45] to merge the two modalities. These techniques enable MLLMs to process both text and image data holistically, extending the application scenario of LLMs.
|
| 24 |
+
|
| 25 |
+
Apart from their high-level visual understanding and reasoning capabilities [29, 59], MLLMs also open up substantial opportunities for Image Quality Assessment (IQA) [50]. As a fundamental vision task, IQA aims to devise computational models to predict image quality as perceived by the Human Visual System (HVS). Ideally MLLMs shall benefit IQA in the following aspects.
|
| 26 |
+
|
| 27 |
+
- Improved accuracy. MLLMs are commonly built upon strong visual encoders [30, 39], which are exposed to massive images of various vision tasks closely related to (and including) IQA. This allows MLLMs to cross-validate visual information from different vision tasks for improved IQA, which is in a similar spirit to multitask learning for IQA [32, 70].
|
| 28 |
+
|
| 29 |
+
- Improved robustness. Being sequential in nature, MLLMs do not rely on the alignment of images for quality assessment, making them easily robust to perturbations that are imperceptible to the human eye, such as mild geometrical transformations [31] and texture resampling [11].
|
| 30 |
+
- Flexibility. Text as one modality of input, MLLMs enable a wide range of text-conditioned IQA, for example, IQA for semantically meaningful local regions [64], fine-grained visual attributes (e.g., color appearance and well-exposedness) [9], and various viewing conditions and display systems [10].
|
| 31 |
+
- Interpretability. MLLMs generate descriptive text rather than merely providing a numerical score [66]. This text output allows for more detailed, contextually rich, and human-like quality assessment, making MLLMs valuable for IQA model-in-the-loop image processing [27].
|
| 32 |
+
|
| 33 |
+
Previous work [53,54,66] focuses primarily on establishing IQA datasets with human quality descriptions to benchmark MLLMs in terms of quality question answering, rating, and reasoning. Appealing at first glance, these studies may suffer from several limitations. First, how to properly instruct human subjects to supply detailed and balanced descriptions of image quality and other relevant visual information is highly nontrivial. Second, image quality is a perceptual quantity with subjective biases. How to screen outlier descriptions and aggregate valid but relatively inconsistent descriptions are overlooked. Third, comparing MLLM outputs to reference descriptions is complex, and remains an intriguing and challenging problem in language modeling [1,26]. After all, the field of IQA has a rich history, and numerous established human-rated image quality datasets are readily accessible for evaluating this perceptual aspect of MLLMs.
|
| 34 |
+
|
| 35 |
+
In this work, we conduct a comprehensive and systematic study of prompting MLLMs for IQA. We first explore nine prompting systems for MLLMs, combining standardized testing procedures in psychophysics (i.e., the single-stimulus, double-stimulus, and multiple-stimulus methods) with popular prompting strategies in NLP (i.e., the standard, in-context, and chain-of-thought prompting). To further challenge MLLMs, we propose a computational procedure to select difficult samples using top-performing IQA expert models as proxies [8, 11, 55, 58, 68, 70], while taking into account sample diversity and uncertainty. Under both Full-Reference (FR) and No-Reference (NR) settings, we experiment with three open-source and one closed-source MLLMs across several visual attributes of image quality, including structural and textural distortions, geometric transformations, and color differences (see Fig. 1).
|
| 36 |
+
|
| 37 |
+
Our experimental results highlight three key takeaways. First, different MLLMs require different prompting systems to perform optimally. This implies the need for a re-examination of the recent progress in MLLMs for IQA, particularly in comparison to GPT-4V [56, 65, 66]. Second, aided by the proposed difficult sample selection method, we demonstrate that there is still ample room for improving MLLMs (including GPT-4V) for IQA, especially in fine-grained quality discrimination and multiple-image quality analysis. Third, we argue that directly fine-tuning open-source MLLMs on datasets with image quality descriptions may not be effective due to the risk of catastrophic forgetting of models'
|
| 38 |
+
|
| 39 |
+
general abilities in visual reasoning and understanding. The encouraging results from the chain-of-thought prompting suggest that IQA should be integrated into a broader and higher-level task, making use of additional physical, geometrical, and semantic information to infer image quality.
|
| 40 |
+
|
| 41 |
+
# 2 Related Work
|
| 42 |
+
|
| 43 |
+
In this section, we present a summary of expert models and MLLMs for IQA.
|
| 44 |
+
|
| 45 |
+
# 2.1 Expert Models for IQA
|
| 46 |
+
|
| 47 |
+
IQA can be divided into two categories: FR- and NR-IQA. FR-IQA models are preferred in situations where the undistorted reference image is available. Representative design philosophies for FR-IQA range from measuring error visibility (e.g., the mean squared error (MSE)), structural similarity (e.g., the SSIM index [50]), and mutual information (e.g., the VIF measure [40]) to (deep) learning-based methods (e.g., the LPIPS [68] and DISTS metrics [11]) and to fusion-based approaches (e.g., VMAF [46]). Almost all FR-IQA models depend on the proper alignment of the reference and test images to execute co-located comparison at the pixel, patch, or feature level. Consequently, these models may struggle to capture the robustness of the HVS to mild geometric transformations and texture resampling. Additionally, these models often give a superficial treatment of color information, leading to a poor account for perceptual color differences [9, 49].
|
| 48 |
+
|
| 49 |
+
NR-IQA models [20, 34, 57, 58, 61, 69, 70] are more challenging yet widely applicable as they evaluate image quality without any reference. Whether they are knowledge-driven [34] or data-driven [69, 70], NR-IQA models rely heavily on human-annotated training data. This reliance leads to weak generalization when applied to a wider range of images and distortions.
|
| 50 |
+
|
| 51 |
+
Apart from accuracy and generalization, expert IQA models also fall short in terms of flexibility and interpretability. This is primarily because they summarize image quality using a numerical score, missing the opportunity to leverage other input formats and to provide more detailed and perceptually relevant outputs.
|
| 52 |
+
|
| 53 |
+
# 2.2 MLLMs for IQA
|
| 54 |
+
|
| 55 |
+
Previous work has been centered on benchmarking and fine-tuning MLLMs for IQA. The pioneering datasets, DepictQA [66] and Q-Bench [53], have initiated the assessment of MLLMs in quality rating and reasoning under the FR and NR conditions, respectively. DepictQA employs a double-stimulus method, prompting MLLMs to compare a pair of images against the reference. Q-Bench takes a single-stimulus approach, computing a numerical score based on the classification of image quality as "poor" or "good". These investigations reveal that, except for the proprietary GPT-4V [59], open-source MLLMs like LLaVA [29], Kosmos-2 [38], and MiniGPT-4 [71] show limited success in replicating human
|
| 56 |
+
|
| 57 |
+
perception of image quality. Efforts to enhance open-source MLLMs through instruction tuning have been made [54]. Yet, such models often revert to generating template-like quality descriptions, lacking the level of flexibility we are looking for. In this work, we take a step back and conduct a comprehensive and systematic exploration of prompting techniques for MLLMs on existing human-rated image quality datasets.
|
| 58 |
+
|
| 59 |
+
# 3 Prompting MLLMs for IQA
|
| 60 |
+
|
| 61 |
+
In this section, we introduce several IQA prompting systems for MLLMs, followed by the description of the difficult sample selection procedure.
|
| 62 |
+
|
| 63 |
+
# 3.1 Prompting Strategies from Psychophysics
|
| 64 |
+
|
| 65 |
+
Although MLLMs and the HVS (or more generally the human brain) operate in different domains, they have fascinating similarities, especially in representational hierarchy [5], visual-textual integration, contextual reasoning [21], and adaptation [12]. Thus, it is natural to employ standardized psychophysical testing procedures as prompting strategies for IQA.
|
| 66 |
+
|
| 67 |
+
Single-stimulus Method. As depicted in Fig. 2 (a), given a test image $x \in \mathcal{X}$ , the FR single-stimulus method<sup>5</sup> suggests MLLMs to output a quality score, $q(x) \in [0,100]$ , based on its perceptual distance to the corresponding reference image $y \in \mathcal{Y}$ . Here, a larger $q$ indicates higher quality. The NR single-stimulus method derives the quality score solely from the test image $x$ . Generally, the single-stimulus method is scalable and straightforward to implement for MLLMs, but may not capture relative quality accurately.
|
| 68 |
+
|
| 69 |
+
Double-stimulus Method. Also known as two-alternative forced choice (2AFC) and paired comparison, the double-stimulus method first samples a pair of images either uniformly or actively [60] from a set of images $\mathcal{X} = \{x_i\}_{i=1}^M$ , where $M$ is the total number of images. In our implementation, MLLMs are not forced to select between the two alternatives with higher quality. Instead, they have a third option to indicate that the quality of the two images is comparable. Fig. 2 (b) illustrates the FR and NR double-stimulus methods. In the FR scenario, the pair of images are constrained to share the same underlying visual content but differ in distortion types and intensities. The reference image is accompanied to facilitate quality comparison. In the NR scenario, the pair of images can be of different content, without supplying any reference image. Upon constructing the pairwise preference matrix $C \in \mathbb{R}^{M \times M}$ , where $C_{ij}$ records the number of times $x_i$ is preferred over $x_j$ for $i \neq j$ , we adopt the maximum a posteriori estimation to aggregate pairwise rankings under the Thurstone's Case V model [44]. Arguably, the double-stimulus method is considered more reliable than the single-stimulus one when gathering human opinions of image quality,
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
Fig. 2: Three standardized psychophysical testing procedures for IQA. (a) Single-stimulus method. (b) Double-stimulus method. (c) Multiple-stimulus method.
|
| 73 |
+
|
| 74 |
+
despite its $O(M^2)$ sample complexity. It remains to be seen whether such reliability extends to prompting MLLMs.
|
| 75 |
+
|
| 76 |
+
Multiple-stimulus Method. As shown in Fig. 2 (c), the multiple-stimulus method presents a more efficient approach to gathering partial rankings by simultaneously presenting a set of $L$ images to MLLMs. Just like the double-stimulus method, in the FR scenario, we limit the list of $L$ images to have identical visual content and supply the corresponding reference image. This restriction and the presence of the reference image are not assumed in the NR scenario. The listwise ranking of $L$ images yields $\binom{L}{2}$ pairwise rankings, which are used to form the pairwise preference matrix $C$ , followed by ranking aggregation. Fig. 3 gives the detailed text prompts to implement the single-stimulus, double-stimulus, and multiple-stimulus methods for GPT-4V in the NR-IQA scenario.
|
| 77 |
+
|
| 78 |
+
# 3.2 Prompting Strategies from NLP
|
| 79 |
+
|
| 80 |
+
Standard Prompting. As shown in Fig. 3 (a), the standard (and the most basic) prompting method is to directly query the quality score, comparison and ranking results without any exemplars [41] nor descriptions to elicit intermediate reasoning steps. This type of prompting is characterized by its brevity, which has been used in [72] to implement the double-stimulus method.
|
| 81 |
+
|
| 82 |
+
Chain-of-thought Prompting. As a simple yet powerful approach to encouraging step-by-step reasoning, chain-of-thought prompting [52] has proven bene
|
| 83 |
+
|
| 84 |
+
# (a) Standard Prompting
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
Single-stimulus Method
|
| 88 |
+
Please assign a perceptual quality score in terms of [...] The score must range from 0 to 100, with a higher score denoting better image quality. [...]
|
| 89 |
+
|
| 90 |
+

|
| 91 |
+
Double-stimulus Method
|
| 92 |
+
Please assign a perceptual quality comparison result between the two images in terms of [...]. If you judge that the first image has better quality than the second image, output 1; if you judge that the second image has better quality than the first image, output 0; if you judge that two images have the same quality, output 2. [...
|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
Multiple-stimulus Method
|
| 96 |
+
Please assign a perceptual quality ranking result among four images in terms of [...]. The image with the lowest perceptual quality is ranked 0, and the image with the highest perceptual quality is ranked 3. If you judge that some distorted images have the same perceptual quality, their ranking can be the same. [...]
|
| 97 |
+
|
| 98 |
+
# (b) Chain-of-thought Prompting
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
Single-stimulus Method
|
| 102 |
+
Please first detail its perceptual quality in terms of [...]. Then, based on the perceptual analysis of the given image, assign a quality score to the given image. The score must range from 0 to 100, with a higher score denoting better image quality. [...]
|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
Double-stimulus Method
|
| 106 |
+
Please first detail their perceptual quality comparison in terms [...] Then, based on the quality comparison analysis between them, assign a perceptual quality comparison result between the two images. If you judge that the first image has better quality than the second image, output 1; if you judge that the second image has better quality than the first image, output 0; if you judge that two images have the same quality, output 2. [...]
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
Multiple-stimulus Method
|
| 110 |
+
Please first detail their perceptual quality comparison in terms of [...]. Then, based on the quality comparison analysis among them, please assign a perceptual quality ranking result among four images. The image with the lowest perceptual quality is ranked 0, and the image with the highest perceptual quality is ranked 3. If you judge that some distorted images have the same perceptual quality, their ranking can be the same. [...]
|
| 111 |
+
|
| 112 |
+
# (c) In-context Prompting
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
Single-stimulus Method
|
| 116 |
+
For the shown two images, the human perceptual quality score of the first image is 50. Now, based on the above example, please assign a perceptual quality score to the second image in terms of [...]. The score must range from 0 to 100, with a higher score denoting better image quality. [...]
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
Double-stimulus Method
|
| 120 |
+
For the first two images (the first and the second images), the human perceptual quality comparison result is that the first image is of better quality than the second image. Now, based on the above example, please assign a perceptual quality comparison result between the second two images (the third and the fourth images) in terms of [...]. If you judge that the third image has better quality than the fourth image, output 1; if you judge that the fourth image has better quality than the third image output 0; if you judge that two images have the same quality, output 2. [...]
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
Multiple-stimulus Method
|
| 124 |
+
For the shown eight images, for the first four images (from the first to the fourth images), the human perceptual quality ranking result is [first: 0, second: 1, third: 2, fourth: 3]. Now, based on the above example, please assign a perceptual quality ranking result among the second four images (from the fifth to the eighth images) in terms of [...]. The image with the lowest perceptual quality is ranked 0, and the image with the highest perceptual quality is ranked 3. If you judge that some distorted images have the same perceptual quality, their ranking can be the same. [...]
|
| 125 |
+
Fig. 3: Instantiations of systematic prompting strategies for GPT-4V in the NR scenario. (a) Standard prompting. (b) Chain-of-thought prompting. (c) In-context prompting. See complete FR and NR text prompts in the supplementary material.
|
| 126 |
+
|
| 127 |
+
ficial for a range of NLP tasks. In the same vein, we request MLLMs to detail the perceptual quality of the test image(s) by examining various visual attributes (and comparing them to the reference image(s) when available), before integrating the analysis results into the overall quality estimate(s).
|
| 128 |
+
|
| 129 |
+
In-context Prompting. One of the key breakthroughs of LLMs is their ability to perform in-context (i.e., few-shot) prompting [63]. By incorporating just a handful of exemplars during inference, LLMs are adept at making accurate predictions, even on tasks they have not encountered before. Such ability to adapt without additional training is particularly valuable and worth investigating for MLLMs. Thus, as depicted in Fig. 3 (c), we apply the in-context prompting method by demonstrating MLLMs image(s) with human quality score(s). Subsequently, we ask MLLMs to assess the quality of the test image(s).
|
| 130 |
+
|
| 131 |
+
By integrating prompting strategies derived from psychophysics and NLP, we end up with nine candidate prompting systems. Given the variations in training data, model architectures, optimization pipelines, and alignment strategies [35, 36, 73], it is anticipated that different MLLMs may function optimally with different prompting systems.
|
| 132 |
+
|
| 133 |
+
# 3.3 Computational Procedure for Difficult Sample Selection
|
| 134 |
+
|
| 135 |
+
Inference with MLLMs tends to be slow and costly, making it impractical to evaluate MLLMs on the full IQA datasets, each paired with the nine candidate prompting systems. We describe a computational procedure to pinpoint a smaller set of the most informative testing samples with three desired properties.
|
| 136 |
+
|
| 137 |
+
First, they should be difficult, with a high likelihood of causing MLLMs to err. The Group MAXimum Differentiation (gMAD) competition [33] has been used before for difficult sample selection, but it is not applicable here due to the violation of the affordable inference assumption. We thus utilize efficient expert IQA models as proxies, and identify their failures by maximizing the MSE between model predictions and human quality scores as a form of "black-box" attacks [37] on MLLMs. Second, the selected samples should be diverse to highlight various aspects of MLLMs' potential weaknesses in IQA. Third, they should be consistent with the majority of human judgments with small variations. Putting together, we formulate the following optimization problem in the FR scenario:
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
y _ {n} ^ {\star} = \underset {y \in \mathcal {Y} \backslash \mathcal {Y} _ {n - 1} ^ {\star}} {\operatorname {a r g m a x}} \frac {1}{| \mathcal {X} _ {y} |} \sum_ {x \in \mathcal {X} _ {y}} \frac {\left(d _ {w} (x , y) - q (x)\right) ^ {2}}{(\sigma (x)) ^ {2} + \epsilon} + \lambda \mathrm {D i v} (y, \mathcal {Y} _ {n - 1} ^ {\star}), \tag {1}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $\mathcal{Y}_{n-1}^{\star} = \{y_{n'}^{\star}\}_{n'=1}^{n-1}$ contains the selected reference images in the previous $n-1$ iterations from $\mathcal{Y}$ , the set of all reference images. $\mathcal{X}_y$ includes all distorted images that originate from $y$ . $d_w(\cdot, \cdot)$ denotes a generic expert FR-IQA model, parameterized by $w$ . $q(\cdot)$ and $\sigma(\cdot)$ are respectively the human quality score and standard deviation. $\mathrm{Div}(\cdot)$ is a point-to-set distance measure, quantifying the added diversity of $y$ to $\mathcal{Y}_{n-1}^{\star}$ . $\lambda$ is a parameter, trading off the variance-normalized squared error and the diversity measure. $\epsilon$ is a small positive constant to avoid any potential division by zero.
|
| 144 |
+
|
| 145 |
+
For each of the $N$ identified reference images, we sample top- $K$ difficult distorted images by solving
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
x _ {k} ^ {\star} = \underset {x \in \mathcal {X} _ {y} \backslash \mathcal {X} _ {k - 1} ^ {\star}} {\operatorname {a r g m a x}} \frac {\left(d _ {w} (x , y) - q (x)\right) ^ {2}}{(\sigma (x)) ^ {2} + \epsilon}, \quad y \in \mathcal {Y} _ {N} ^ {\star}, \tag {2}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
where $\mathcal{X}_{k - 1}^{\star} = \{x_{k'}^{\star}\}_{k' = 1}^{k - 1}$ . Similarly, in the NR scenario, we sample top- $N$ difficult images by slightly modifying Eq. (1):
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
x _ {n} ^ {\star} = \operatorname * {a r g m a x} _ {x \in \mathcal {X} \backslash \mathcal {X} _ {n - 1} ^ {\star}} \frac {\left(q _ {w} (x) - q (x)\right) ^ {2}}{(\sigma (x)) ^ {2} + \epsilon} + \lambda \mathrm {D i v} (x, \mathcal {X} _ {n - 1} ^ {\star}), \tag {3}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
(a) FR-KADID
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
(b) NR-KADID
|
| 162 |
+
Fig. 4: Comparison between difficult sample selection with and without variance normalization under the same level of sample diversity.
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
(c) SPAQ
|
| 166 |
+
|
| 167 |
+
where $q_{w}(\cdot)$ denotes a generic expert NR-IQA model. Fig. 4 shows the comparison between difficult sample selection with and without variance normalization (as a measure of sample uncertainty) under the same level of sample diversity. We find that variance-normalized sampling induces a noticeable shift towards zero in the empirical distribution of sample standard deviations, suggesting an enhanced level of human consistency in assessing image quality.
|
| 168 |
+
|
| 169 |
+
# 4 Experiments
|
| 170 |
+
|
| 171 |
+
In this section, we first present the experimental setups, and then carry out a comprehensive evaluation of the nine IQA prompting systems for MLLMs.
|
| 172 |
+
|
| 173 |
+
# 4.1 Experimental Setups
|
| 174 |
+
|
| 175 |
+
Datasets. We examine four visual attributes in the FR scenario, including synthetic structural and textural distortions, geometric transformations, texture similarity, and color differences. The selected datasets are FR-KADID [28], TQD [11], SPCD [49], and Aug-KADID $^{6}$ [28], respectively. In the NR scenario, we examine synthetic and authentic structural and textual distortions and algorithm-based artifacts, using the NR-KADID [28], SPAQ [15], and AGIQA-3K [23] datasets, respectively.
|
| 176 |
+
|
| 177 |
+
Sample Selection Details. In the FR scenario, the numbers of sampled reference images $N$ and the corresponding distorted images $K$ are set to 15 and 10, respectively. In the NR scenario, the number of selected images $N$ is set to 150. The trade-off parameter $\lambda$ and the stability constant $\epsilon$ in Eqs. (1) and (3) are set to 0.01 and 1, respectively. The point-to-set distance measure $\mathrm{Div}(\cdot)$ is implemented by the MSE in the feature space of the CLIP visual encoder [39]. The expert FR-IQA model $d_w(\cdot,\cdot)$ and NR-IQA model $q_w(\cdot)$ are linearly fused with equal weightings using two top-performing models - one excelling in maintaining within-dataset correlation, and the other demonstrating robust cross-dataset
|
| 178 |
+
|
| 179 |
+
Table 1: SRCC results of MLLMs paired with different prompting systems on uniformly sampled images. Model-S, Model-C, and Model-I denote the standard prompting, chain-of-thought prompting, and in-context prompting, respectively. The best result in each section is highlighted in bold.
|
| 180 |
+
|
| 181 |
+
<table><tr><td rowspan="2">Method</td><td colspan="4">FR IQA</td><td colspan="3">NR IQA</td></tr><tr><td>FR-KADID</td><td>Aug-KADID</td><td>TQD</td><td>SPCD</td><td>NR-KADID</td><td>SPAQ</td><td>AGIQA-3K</td></tr><tr><td colspan="8">Single-stimulus Method</td></tr><tr><td>LLaVA-v1.6-S</td><td>0.227</td><td>0.013</td><td>0.180</td><td>0.001</td><td>0.262</td><td>0.544</td><td>0.614</td></tr><tr><td>mPLUG-Owl2-S</td><td>0.285</td><td>0.218</td><td>0.228</td><td>0.081</td><td>0.126</td><td>0.467</td><td>0.279</td></tr><tr><td>InternLM-XC2-VL-S</td><td>0.274</td><td>0.272</td><td>0.299</td><td>0.009</td><td>0.252</td><td>0.794</td><td>0.512</td></tr><tr><td>GPT-4V-S</td><td>0.745</td><td>0.786</td><td>0.773</td><td>0.098</td><td>0.467</td><td>0.860</td><td>0.420</td></tr><tr><td>LLaVA-v1.6-C</td><td>0.164</td><td>0.300</td><td>0.226</td><td>0.174</td><td>0.151</td><td>0.550</td><td>0.580</td></tr><tr><td>mPLUG-Owl2-C</td><td>0.387</td><td>0.361</td><td>0.278</td><td>0.122</td><td>0.179</td><td>0.455</td><td>0.409</td></tr><tr><td>InternLM-XC2-VL-C</td><td>0.237</td><td>0.306</td><td>0.167</td><td>0.063</td><td>0.306</td><td>0.649</td><td>0.507</td></tr><tr><td>GPT-4V-C</td><td>0.809</td><td>0.782</td><td>0.809</td><td>0.121</td><td>0.517</td><td>0.869</td><td>0.677</td></tr><tr><td>LLaVA-v1.6-I</td><td>0.249</td><td>0.194</td><td>0.222</td><td>0.147</td><td>0.116</td><td>0.019</td><td>0.061</td></tr><tr><td>mPLUG-Owl2-I</td><td>0.373</td><td>0.373</td><td>0.246</td><td>0.047</td><td>0.017</td><td>0.083</td><td>0.409</td></tr><tr><td>InternLM-XC2-VL-I</td><td>0.380</td><td>0.241</td><td>0.204</td><td>0.087</td><td>0.188</td><td>0.342</td><td>0.461</td></tr><tr><td>GPT-4V-I</td><td>0.771</td><td>0.753</td><td>0.738</td><td>0.028</td><td>0.590</td><td>0.845</td><td>0.650</td></tr><tr><td colspan="8">Double-stimulus Method</td></tr><tr><td>LLaVA-v1.6-S</td><td>0.387</td><td>0.396</td><td>0.390</td><td>0.113</td><td>0.270</td><td>0.430</td><td>0.234</td></tr><tr><td>mPLUG-Owl2-S</td><td>0.435</td><td>0.307</td><td>0.350</td><td>0.117</td><td>0.126</td><td>0.157</td><td>0.020</td></tr><tr><td>InternLM-XC2-VL-S</td><td>0.309</td><td>0.408</td><td>0.440</td><td>0.042</td><td>0.267</td><td>0.690</td><td>0.555</td></tr><tr><td>GPT-4V-S</td><td>0.679</td><td>0.743</td><td>0.655</td><td>0.031</td><td>0.552</td><td>0.834</td><td>0.599</td></tr><tr><td>LLaVA-v1.6-C</td><td>0.332</td><td>0.355</td><td>0.257</td><td>0.109</td><td>0.124</td><td>0.065</td><td>0.174</td></tr><tr><td>mPLUG-Owl2-C</td><td>0.409</td><td>0.334</td><td>0.318</td><td>0.013</td><td>0.199</td><td>0.122</td><td>0.130</td></tr><tr><td>InternLM-XC2-VL-C</td><td>0.332</td><td>0.411</td><td>0.267</td><td>0.131</td><td>0.165</td><td>0.556</td><td>0.546</td></tr><tr><td>GPT-4V-C</td><td>0.818</td><td>0.830</td><td>0.786</td><td>0.124</td><td>0.639</td><td>0.881</td><td>0.771</td></tr><tr><td>LLaVA-v1.6-I</td><td>0.379</td><td>0.396</td><td>0.324</td><td>0.032</td><td>0.169</td><td>0.128</td><td>0.156</td></tr><tr><td>mPLUG-Owl2-I</td><td>0.257</td><td>0.257</td><td>0.169</td><td>0.083</td><td>0.078</td><td>0.164</td><td>0.120</td></tr><tr><td>InternLM-XC2-VL-I</td><td>0.348</td><td>0.376</td><td>0.379</td><td>0.144</td><td>0.034</td><td>0.108</td><td>0.123</td></tr><tr><td>GPT-4V-I</td><td>0.470</td><td>0.244</td><td>0.340</td><td>0.122</td><td>0.531</td><td>0.761</td><td>0.714</td></tr><tr><td colspan="8">Multiple-stimulus Method</td></tr><tr><td>LLaVA-v1.6-S</td><td>0.349</td><td>0.351</td><td>0.315</td><td>0.241</td><td>0.169</td><td>0.221</td><td>0.210</td></tr><tr><td>mPLUG-Owl2-S</td><td>0.385</td><td>0.428</td><td>0.297</td><td>0.104</td><td>0.124</td><td>0.061</td><td>0.228</td></tr><tr><td>InternLM-XC2-VL-S</td><td>0.484</td><td>0.420</td><td>0.241</td><td>0.015</td><td>0.047</td><td>0.044</td><td>0.154</td></tr><tr><td>GPT-4V-S</td><td>0.824</td><td>0.844</td><td>0.747</td><td>0.037</td><td>0.397</td><td>0.715</td><td>0.461</td></tr><tr><td>LLaVA-v1.6-C</td><td>0.292</td><td>0.424</td><td>0.288</td><td>0.043</td><td>0.227</td><td>0.111</td><td>0.122</td></tr><tr><td>mPLUG-Owl2-C</td><td>0.377</td><td>0.406</td><td>0.376</td><td>0.126</td><td>0.214</td><td>0.166</td><td>0.084</td></tr><tr><td>InternLM-XC2-VL-C</td><td>0.500</td><td>0.466</td><td>0.273</td><td>0.038</td><td>0.031</td><td>0.037</td><td>0.148</td></tr><tr><td>GPT-4V-C</td><td>0.761</td><td>0.806</td><td>0.754</td><td>0.036</td><td>0.537</td><td>0.817</td><td>0.679</td></tr><tr><td>LLaVA-v1.6-I</td><td>0.337</td><td>0.380</td><td>0.356</td><td>0.203</td><td>0.152</td><td>0.033</td><td>0.241</td></tr><tr><td>mPLUG-Owl2-I</td><td>0.268</td><td>0.268</td><td>0.377</td><td>0.067</td><td>0.196</td><td>0.142</td><td>0.121</td></tr><tr><td>InternLM-XC2-VL-I</td><td>0.489</td><td>0.235</td><td>0.212</td><td>0.046</td><td>0.038</td><td>0.102</td><td>0.114</td></tr><tr><td>GPT-4V-I</td><td>0.585</td><td>0.496</td><td>0.389</td><td>0.023</td><td>0.168</td><td>0.416</td><td>0.201</td></tr></table>
|
| 182 |
+
|
| 183 |
+
generalization. Specifically, for within-dataset performance, we select TOPIQ [8] on FR-KADID [28], DISTS [11] on TQD [11] and Aug-KADID [28], CDNet [49] on SPCD [49], LIQE [70] on SPAQ [15] and NR-KADID [28], and Q-Align [55] on AGIQA-3K [23]. For cross-dataset performance, we choose LPIPS [68] and MANIQA [58] in the FR and NR scenarios, respectively.
|
| 184 |
+
|
| 185 |
+
# 4.2 Comparison of Nine Prompting Systems
|
| 186 |
+
|
| 187 |
+
We initially combine the nine prompting systems with three open-source MLLMs: LLaVA-v1.6 (Mistral-7B) [29], InternLM-XComposer2-VL (InernLM2-7B) [13] and mPLUG-Owl2 (LLaMA2-7B) [62] and one closed-source MLLM: GPT-4V [59], and compare them on 150 uniformly sampled images, with the
|
| 188 |
+
|
| 189 |
+
# Input Prompt
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
The first image is <ImageHere>. The second image is <ImageHere>. Please tell me the main object of two images, respectively.
|
| 193 |
+
Fig. 5: Behaviors of different MLLMs in recognizing objects from multiple images.
|
| 194 |
+
|
| 195 |
+
# Answers
|
| 196 |
+
|
| 197 |
+
# LLaVA-v1.6 (Mistral-7B)
|
| 198 |
+
|
| 199 |
+
The main object in the first image is a cat. The main object in the second image is a cat
|
| 200 |
+
|
| 201 |
+
# mPLUG-Owl2 (LLaMA2-7B)
|
| 202 |
+
|
| 203 |
+
In the first image, the main object is a cat with blue eyes sitting in a field of flowers. In the second image, the main object is also a cat with blue eyes, but it has a collar and is surrounded by lavender plants.
|
| 204 |
+
|
| 205 |
+
# InternLM-XComposer2-VL (InernLM2-7B)
|
| 206 |
+
|
| 207 |
+
In the first image, the main object is a white cat. In the second image, the main object is a red car.
|
| 208 |
+
|
| 209 |
+
# GPT-4V
|
| 210 |
+
|
| 211 |
+
The main object in the first image is a cat with blue eyes sitting in a garden setting. The second image's main object is a classic red convertible car, which appears to be parked on a road with the sunset in the background.
|
| 212 |
+
|
| 213 |
+
goal of identifying the best prompting system for each MLLM. Table 1 shows the Spearman's rank correlation coefficient (SRCC) results.
|
| 214 |
+
|
| 215 |
+
Analysis of Two IQA Scenarios. From the Table, it is evident that no open-source MLLMs achieve satisfactory IQA performance in the FR scenario regardless of the adopted prompting system. These models may generate irrelevant text outputs or encounter complete failures when the input text prompts become more detailed and elaborate. We hypothesize that these models are predominantly trained or aligned on single-image vision tasks, making it challenging for them to analyze multiple images, especially of the same underlying content [29]. Fig. 5 provides an example of how different MLLMs behave when recognizing objects from multiple images, a high-level vision task at which they should excel. However, it appears that LLaVA-v1.6 [29] and mPLUG-Owl2 [62] completely disregard the second image, despite it being explicitly mentioned and separated from the first image in the input prompt. In the NR scenario with single-stimulus standard prompting (i.e., the single-image analysis scenario), LLaVA-v1.6 [29] and mPLUG-Owl2 [62] deliver improved quality prediction accuracy. InternLMXComposer2-VL [13] and GPT-4V perform remarkably in handling realistic camera distortions on SPAQ [15].
|
| 216 |
+
|
| 217 |
+
Analysis of Psychophysical Prompting Methods. The results in the table reveal that for the three open-source MLLMs, the single-stimulus method is the optimal choice due to their limited ability to analyze multiple images. In stark contrast, GPT-4V [59] benefits from multiple-image analysis, and performs optimally under double-stimulus chain-of-thought prompting. These findings raise questions about recent claims [56, 65, 66] that open-source MLLMs have surpassed GPT-4V-level IQA performance by instruction-tuning on existing (weakly-)annotated benchmarks, given that the optimal prompting system has not been paired with GPT-4V.
|
| 218 |
+
|
| 219 |
+
Analysis of NLP Prompting Methods. It is expected that in-context prompting is little likely to bring performance gains to the three open-source MLLMs due to the added complexity of processing additional image(s) in the context. Interestingly, we find that in-context prompting does not aid GPT-4V with single-stimulus prompting either. When combined with multiple-stimulus prompting, GPT-4V performs poorly across various visual attributes and in both the FR and NR scenarios. Taking a closer look, we compare single-stimulus in
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Mos: 30.2
|
| 223 |
+
Fig. 6: Comparison of double-stimulus standard and chain-of-thought promptings for GPT-4V in the NR scenario.
|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
Mos: 24.7
|
| 227 |
+
|
| 228 |
+
# GPT-4V (standard prompting)
|
| 229 |
+
|
| 230 |
+
Score: 0
|
| 231 |
+
|
| 232 |
+
Score 0: The second image has better visual quality than the first image
|
| 233 |
+
|
| 234 |
+
Score 1: The first image has better visual quality than the second image
|
| 235 |
+
|
| 236 |
+
Description: Both images exhibit motion blur, however, the first image preserves more details and some parts of the structure, such as buildings, are recognizable despite the blur. The color reproduction appears to be natural with ambient lighting reflecting true to the time of day. The second image has significant blur affecting the entire frame, making it difficult to discern any specific details or structure. The colors and textures are largely indistinguishable due to this heavy blur.
|
| 237 |
+
|
| 238 |
+
Score: 1
|
| 239 |
+
|
| 240 |
+
context prompting with double-stimulus standard prompting for GPT-4V. The main difference is that the former offers a human quality score for the contextual image, aiming to aid visual quality comparison. The comparable performance achieved by both prompting systems suggests that the inclusion of a human quality score has minimal impact on the GPT-4V inference. In cases when the input prompt contains multiple images (e.g., eight images for multiple-stimulus in-context prompting), even GPT-4V finds difficulty in processing such substantial amount of visual information, resulting in a sharp performance drop.
|
| 241 |
+
|
| 242 |
+
The chain-of-thought prompting, on the other hand, paints a different picture: it consistently enhances the performance of GPT-4V [59] under three psychophysical testing protocols and across nearly all visual attributes. The remarkable improvements may arise because the chain-of-thought prompting elicits the break down of the intricate IQA task into simpler sub-tasks. This allows for a more meticulous inspection of visually critical factors such as structure and texture preservation, as well as color and luminance reproduction. On the other hand, as shown in Fig. 6, the chain-of-thought prompting also encourages integrating IQA into a broader and higher-level task, orchestrating with additional physical, geometrical, and semantic information of the natural scene to infer image quality. This reminds the authors of a famous slogan in computer vision: "If you cannot solve a simple problem in vision, you may have to solve a complex one (by Songchun Zhu)".
|
| 243 |
+
|
| 244 |
+
# 4.3 Further Testing on Difficult Data
|
| 245 |
+
|
| 246 |
+
In this subsection, we compare the previous four MLLMs, each with the optimally suited prompting system and a quality-instruction-tuned MLLM, Q-Instruct [54], against representative expert IQA systems, including PSNR, SSIM [50], FSIM [67], LPIPS [68], AHIQ [22] and DISTS [11] as FR models, and NIQE [34], MUSIQ [20], MANIQA [58] and LIQE [70] as NR models on the set of difficult data described in Sec. 3.3.
|
| 247 |
+
|
| 248 |
+
Results in the FR Scenario. Table 2 shows the SRCC and Pearson's linear correlation coefficient (PLCC) results in the FR scenario. The primary observation is that the selected difficult images by "attacking" expert IQA models
|
| 249 |
+
|
| 250 |
+
Table 2: Comparison of MLLMs with optimally suited prompting systems against expert IQA systems in the FR scenario. * indicates that the model has been trained on the dataset.
|
| 251 |
+
|
| 252 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">FR-KADID</td><td colspan="2">Aug-KADID</td><td colspan="2">TQD</td><td colspan="2">SPCD</td></tr><tr><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td></tr><tr><td>PSNR</td><td>0.479</td><td>0.675</td><td>0.381</td><td>0.644</td><td>0.345</td><td>0.522</td><td>0.576</td><td>0.570</td></tr><tr><td>SSIM [50]</td><td>0.553</td><td>0.694</td><td>0.405</td><td>0.633</td><td>0.510</td><td>0.618</td><td>0.229</td><td>0.246</td></tr><tr><td>FSIM [67]</td><td>0.704</td><td>0.762</td><td>0.400</td><td>0.560</td><td>0.332</td><td>0.408</td><td>0.205</td><td>0.206</td></tr><tr><td>LPIPS [68]</td><td>0.477</td><td>0.654</td><td>0.547</td><td>0.654</td><td>0.469</td><td>0.511</td><td>0.280</td><td>0.252</td></tr><tr><td>AHIQ [22]</td><td>0.512</td><td>0.583</td><td>0.512</td><td>0.688</td><td>0.467</td><td>0.608</td><td>0.240</td><td>0.269</td></tr><tr><td>DISTS [11]</td><td>0.647*</td><td>0.740*</td><td>0.701</td><td>0.696</td><td>0.911</td><td>0.901</td><td>0.454</td><td>0.422</td></tr><tr><td>LLaVA-v1.6 [29]</td><td>0.112</td><td>0.218</td><td>0.198</td><td>0.213</td><td>0.180</td><td>0.226</td><td>0.037</td><td>0.008</td></tr><tr><td>mPLUG-Owl2 [62]</td><td>0.248</td><td>0.435</td><td>0.358</td><td>0.484</td><td>0.228</td><td>0.335</td><td>0.102</td><td>0.108</td></tr><tr><td>InternLM-XC2-VL [13]</td><td>0.246</td><td>0.336</td><td>0.235</td><td>0.404</td><td>0.299</td><td>0.421</td><td>0.171</td><td>0.143</td></tr><tr><td>GPT-4V [59]</td><td>0.669</td><td>0.795</td><td>0.708</td><td>0.800</td><td>0.786</td><td>0.857</td><td>0.122</td><td>0.234</td></tr></table>
|
| 253 |
+
|
| 254 |
+
Table 3: Comparison of MLLMs with optimally suited prompting systems against expert IQA systems in the NR scenario.
|
| 255 |
+
|
| 256 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">SPAQ</td><td colspan="2">NR-KADID</td><td colspan="2">AGIQA-3K</td></tr><tr><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td></tr><tr><td>NIQE [34]</td><td>0.551</td><td>0.616</td><td>0.385</td><td>0.555</td><td>0.610</td><td>0.651</td></tr><tr><td>MUSIQ [20]</td><td>0.769</td><td>0.817</td><td>0.567</td><td>0.653</td><td>0.686</td><td>0.588</td></tr><tr><td>MANIQA [58]</td><td>0.546</td><td>0.564</td><td>0.428</td><td>0.387</td><td>0.521</td><td>0.599</td></tr><tr><td>LIQE [70]</td><td>0.781*</td><td>0.752*</td><td>0.866*</td><td>0.930*</td><td>0.703</td><td>0.693</td></tr><tr><td>LLaVA-v1.6 [29]</td><td>0.317</td><td>0.305</td><td>0.428</td><td>0.370</td><td>0.503</td><td>0.573</td></tr><tr><td>mPLUG-Owl2 [62]</td><td>0.270</td><td>0.198</td><td>0.128</td><td>0.187</td><td>0.168</td><td>0.201</td></tr><tr><td>InternLM-XC2-VL [13]</td><td>0.580</td><td>0.540</td><td>0.454</td><td>0.361</td><td>0.608</td><td>0.590</td></tr><tr><td>Q-Instruct [54]</td><td>0.799*</td><td>0.783*</td><td>0.635</td><td>0.613</td><td>0.853*</td><td>0.821*</td></tr><tr><td>GPT-4V [59]</td><td>0.845</td><td>0.843</td><td>0.513</td><td>0.453</td><td>0.783</td><td>0.746</td></tr></table>
|
| 257 |
+
|
| 258 |
+
using Eqs. (1) and (3) pose a challenge to GPT-4V, with noticeably reduced IQA performance. Nevertheless, on FR-KADID with synthetic structural and textural distortions, GPT-4V [59] is on par with the leading expert IQA model, FSIM [67], which assesses structural similarity in the feature space. On Aug-KADID and TQD with geometric transformations and texture similarity, respectively, GPT-4V exhibits a commendable level of resilience, although not as good as DISTS [11]. It is important to note that the ways of attaining such perceptual robustness differ between the two methods. DISTS relies on comparison of spatial averages of VGG [42] feature maps, whereas GPT-4V benefits from its intrinsic ability to compile and process data sequentially. On SPCD with color differences, all MLLMs, including GPT-4V, encounter challenges in emulating human color perception, and struggle to differentiate images even with clearly noticeable variations in color appearances.
|
| 259 |
+
|
| 260 |
+
Results in the NR Scenario. Table 3 shows the SRCC and PLCC results in the NR scenario. It is clear that GPT-4V [59] is outstanding in capturing the authentic structural and textural distortions on SPAQ, surpassing the two expert IQA systems MUSIQ [20] and LIQE [70]. Furthermore, GPT-4V showcases a remarkable generalizability to AI-generated images, indicating its great potential
|
| 261 |
+
|
| 262 |
+
to guide the optimization of AI generative models for images. Q-Instruct [54], fine-tuned from a variant of LLaVA, exhibits enhanced IQA skills compared to LLaVA-v1.6, thus highlighting the effectiveness of visual instruction tuning.
|
| 263 |
+
|
| 264 |
+
# 4.4 Discussion and Limitation
|
| 265 |
+
|
| 266 |
+
We briefly discuss some of the limitations of this work and opportunities for future work. First, the current prompting systems have room for improvement, as the input prompts are not optimized. This presents an opportunity for exploring automatic prompt optimization [17,25] within our prompting systems. Second, our sampler relies on human quality scores, and is thus only applicable to existing human-rated IQA datasets. Extending it to sample from large-scale unlabeled image sets will need to 1) accelerate the inference speed of MLLMs so as to leverage model falsification methodologies (e.g., the gMAD competition [33]) or 2) train additional failure-prediction modules [7] in a parameter-efficient way [18]. Third, the textual responses produced by MLLMs have not been quantitatively assessed. Prior research has applied GPT-4 [2] to assess quality responses in terms of correctness, consistency, relevance, informativeness, coherence, and naturalness. Fourth, this work does not touch on instruction tuning of MLLMs to enhance the IQA performance. Preliminary efforts have been made by Wu et al. [54] and You et al. [66] to directly fine-tune open-source MLLMs on datasets with image quality descriptions. Our results suggest a more encouraging approach: (active) continual learning and/or parameter-efficient tuning of MLLMs to strike a good balance between the specificity (to IQA) and the generality of open-source MLLMs.
|
| 267 |
+
|
| 268 |
+
# 5 Conclusion
|
| 269 |
+
|
| 270 |
+
We have presented a comprehensive study of MLLMs for IQA, with emphasis on systematic prompting strategies. Our study arises as a natural combination of methods from two separate lines of research: psychophysics and NLP. The first involves collecting reliable measurements of perceptual quantities in response to physical stimuli. The second endeavors to design input textual descriptions to an LLM to elicit a specific response. Our experiments have shown that different MLLMs admit different prompting systems to perform optimally. This suggests the need for a re-evaluation of the recent progress in MLLMs for IQA. Moreover, there is still ample room for improving the IQA capabilities of MLLMs (including GPT-4V), especially in terms of fine-grained quality discrimination and multiple-image quality analysis.
|
| 271 |
+
|
| 272 |
+
Meanwhile, we have emphasized the importance of sample selection when evaluating MLLMs for IQA, owing to the high cost associated with inference. In response, we have proposed a computational procedure for difficult sample selection, taking into account both sample diversity and uncertainty. Our sampler relies on inference-efficient expert IQA models, and can be seen as a form of black-box attacks on MLLMs, assuming no knowledge of their internal mechanisms nor the external input-output behaviors.
|
| 273 |
+
|
| 274 |
+
# Acknowledgements
|
| 275 |
+
|
| 276 |
+
The authors would like to thank the generous support from OPPO. This work was supported in part by the National Natural Science Foundation of China (62071407 and 61991451), the Hong Kong ITC Innovation and Technology Fund (9440379 and 9440390), and the Shenzhen Science and Technology Program (JCYJ20220818101001004).
|
| 277 |
+
|
| 278 |
+
# References
|
| 279 |
+
|
| 280 |
+
1. Achananuparp, P., Hu, X., Shen, X.: The evaluation of sentence similarity measures. In: Data Warehousing and Knowledge Discovery. pp. 305-316 (2008)
|
| 281 |
+
2. Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., Aleman, F.L., Almeida, D., Altenschmidt, J., Altman, S., Anadkat, S., et al.: GPT-4 technical report. arXiv preprint arXiv:2303.08774 (2023)
|
| 282 |
+
3. Alayrac, J.B., Donahue, J., Luc, P., Miech, A., Barr, I., Hasson, Y., Lenc, K., Mensch, A., Millican, K., Reynolds, M., et al.: Flamingo: A visual language model for few-shot learning. In: Advances in Neural Information Processing Systems. vol. 35, pp. 23716-23736 (2022)
|
| 283 |
+
4. Bai, J., Bai, S., Yang, S., Wang, S., Tan, S., Wang, P., Lin, J., Zhou, C., Zhou, J.: Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966 (2023)
|
| 284 |
+
5. Bracci, S., Mraz, J., Zeman, A., Leys, G., Op de Beeck, H.: The representational hierarchy in human and artificial visual systems in the presence of object-scene regularities. PLOS Computational Biology 19(4), 1-5 (2023)
|
| 285 |
+
6. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. In: Advances in Neural Information Processing Systems. vol. 33, pp. 1877-1901 (2020)
|
| 286 |
+
7. Cao, P., Li, D., Ma, K.: Image quality assessment: Integrating model-centric and data-centric approaches. In: Conference on Parsimony and Learning. pp. 529-541 (2024)
|
| 287 |
+
8. Chen, C., Mo, J., Hou, J., Wu, H., Liao, L., Sun, W., Yan, Q., Lin, W.: TOPIQ: A top-down approach from semantics to distortions for image quality assessment. arXiv preprint arXiv:2308.03060 (2023)
|
| 288 |
+
9. Chen, H., Wang, Z., Yang, Y., Sun, Q., Ma, K.: Learning a deep color difference metric for photographic images. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22242-22251 (2023)
|
| 289 |
+
10. Chubarau, A., Akhavan, T., Yoo, H., Mantiuk, R.K., Clark, J.: Perceptual image quality assessment for various viewing conditions and display systems. In: Image Quality and System Performance. pp. 1-9 (2020)
|
| 290 |
+
1. Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: Unifying structure and texture similarity. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(5), 2567-2581 (2020)
|
| 291 |
+
2. Dong, Q., Li, L., Dai, D., Zheng, C., Wu, Z., Chang, B., Sun, X., Xu, J., Sui, Z.: A survey for in-context learning. arXiv preprint arXiv:2301.00234 (2022)
|
| 292 |
+
3. Dong, X., Zhang, P., Zang, Y., Cao, Y., Wang, B., Ouyang, L., Wei, X., Zhang, S., Duan, H., Cao, M., et al.: InternLM-XComposer2: Mastering free-form text-image composition and comprehension in vision-language large model. arXiv preprint arXiv:2401.16420 (2024)
|
| 293 |
+
|
| 294 |
+
14. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations (2020)
|
| 295 |
+
15. Fang, Y., Zhu, H., Zeng, Y., Ma, K., Wang, Z.: Perceptual quality assessment of smartphone photography. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3677-3686 (2020)
|
| 296 |
+
16. Gao, P., Han, J., Zhang, R., Lin, Z., Geng, S., Zhou, A., Zhang, W., Lu, P., He, C., Yue, X., et al.: LLaMA-Adapter V2: Parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010 (2023)
|
| 297 |
+
17. Guo, Q., Wang, R., Guo, J., Li, B., Song, K., Tan, X., Liu, G., Bian, J., Yang, Y.: Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In: International Conference on Learning Representations (2024)
|
| 298 |
+
18. Hu, E.J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., Chen, W.: LoRA: Low-rank adaptation of large language models. In: International Conference on Learning Representations (2022)
|
| 299 |
+
19. Kaplan, J., McCandlish, S., Henighan, T., Brown, T.B., Chess, B., Child, R., Gray, S., Radford, A., Wu, J., Amodei, D.: Scaling laws for neural language models. arXiv preprint arXiv:2001.08361 (2020)
|
| 300 |
+
20. Ke, J., Wang, Q., Wang, Y., Milanfar, P., Yang, F.: MUSIQ: Multi-scale image quality transformer. In: IEEE/CVF International Conference on Computer Vision. pp. 5148-5157 (2021)
|
| 301 |
+
21. Kewenig, V., Lampinen, A., Nastase, S.A., Edwards, C., DEstalenx, Q.L., Rechardt, A., Skipper, J.I., Vigliocco, G.: Multimodality and attention increase alignment in natural language prediction between humans and computational models. arXiv preprint arXiv:2308.06035 (2024)
|
| 302 |
+
22. Lao, S., Gong, Y., Shi, S., Yang, S., Wu, T., Wang, J., Xia, W., Yang, Y.: Attentions help CNNs see better: Attention-based hybrid image quality assessment network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshop. pp. 1140-1149 (2022)
|
| 303 |
+
23. Li, C., Zhang, Z., Wu, H., Sun, W., Min, X., Liu, X., Zhai, G., Lin, W.: AGIQA-3K: An open database for AI-generated image quality assessment. arXiv preprint arXiv:2306.04717 (2023)
|
| 304 |
+
24. Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. In: International Conference on Machine Learning. pp. 19730-19742 (2023)
|
| 305 |
+
25. Li, X.L., Liang, P.: Prefix-tuning: Optimizing continuous prompts for generation. In: Association for Computational Linguistics and International Joint Conference on Natural Language Processing. pp. 4582-4597 (2021)
|
| 306 |
+
26. Li, Y., McLean, D., Bandar, Z.A., O'shea, J.D., Crockett, K.: Sentence similarity based on semantic nets and corpus statistics. IEEE Transactions on Knowledge and Data Engineering 18(8), 1138-1150 (2006)
|
| 307 |
+
27. Liang, Z., Li, C., Zhou, S., Feng, R., Loy, C.C.: Iterative prompt learning for unsupervised backlit image enhancement. In: IEEE/CVF International Conference on Computer Vision. pp. 8094-8103 (2023)
|
| 308 |
+
28. Lin, H., Hosu, V., Saupe, D.: KADID-10k: A large-scale artificially distorted IQA database. In: International Conference on Quality of Multimedia Experience. pp. 1-3 (2019)
|
| 309 |
+
29. Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: Advances in Neural Information Processing Systems. vol. 36, pp. 1-25 (2024)
|
| 310 |
+
|
| 311 |
+
30. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin Transformer: Hierarchical vision transformer using shifted windows. In: IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021)
|
| 312 |
+
31. Ma, K., Duanmu, Z., Wang, Z.: Geometric transformation invariant image quality assessment using convolutional neural networks. In: IEEE International Conference on Acoustics, Speech and Signal Processing. pp. 6732-6736 (2018)
|
| 313 |
+
32. Ma, K., Liu, W., Zhang, K., Duanmu, Z., Wang, Z., Zuo, W.: End-to-end blind image quality assessment using deep neural networks. IEEE Transactions on Image Processing 27(3), 1202-1213 (2017)
|
| 314 |
+
33. Ma, K., Wu, Q., Wang, Z., Duanmu, Z., Yong, H., Li, H., Zhang, L.: Group MAD competition-a new methodology to compare objective image quality models. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1664-1673 (2016)
|
| 315 |
+
34. Mittal, A., Soundararajan, R., Bovik, A.C.: Making a "completely blind" image quality analyzer. IEEE Signal Processing Letters 20(3), 209-212 (2012)
|
| 316 |
+
35. Ngo, R., Chan, L., Mindermann, S.: The alignment problem from a deep learning perspective. In: International Conference on Learning Representations (2022)
|
| 317 |
+
36. Ouyang, L., Wu, J., Jiang, X., Almeida, D., Wainwright, C., Mishkin, P., Zhang, C., Agarwal, S., Slama, K., Ray, A., et al.: Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems 35, 27730-27744 (2022)
|
| 318 |
+
37. Papernot, N., McDaniel, P., Goodfellow, I., Jha, S., Celik, Z.B., Swami, A.: Practical black-box attacks against machine learning. In: ACM Asia Conference on Computer and Communications Security. pp. 506-519 (2017)
|
| 319 |
+
38. Peng, Z., Wang, W., Dong, L., Hao, Y., Huang, S., Ma, S., Wei, F.: Kosmos-2: Grounding multimodal large language models to the world. arXiv preprint arXiv:2306.14824 (2023)
|
| 320 |
+
39. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning. pp. 8748-8763 (2021)
|
| 321 |
+
40. Sheikh, H.R., Bovik, A.C.: Image information and visual quality. IEEE Transactions on Image Processing 15(2), 430-444 (2006)
|
| 322 |
+
41. Shin, S., Lee, S.W., Ahn, H., Kim, S., Kim, H., Kim, B., Cho, K., Lee, G., Park, W., Ha, J.W., et al.: On the effect of pretraining corpora on in-context learning by a large-scale language model. In: The North American Chapter of the Association for Computational Linguistics. pp. 5168-5186 (2022)
|
| 323 |
+
42. Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: International Conference on Learning Representations (2014)
|
| 324 |
+
43. Team, G., Anil, R., Borgeaud, S., Wu, Y., Alayrac, J.B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A.M., Hauth, A., et al.: Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)
|
| 325 |
+
44. Thurstone, L.L.: A law of comparative judgment. Psychological Review 34, 273-286 (1927)
|
| 326 |
+
45. Tong, S., Brown, E., Wu, P., Woo, S., Middepogu, M., Akula, S.C., Yang, J., Yang, S., Iyer, A., Pan, X., et al.: Cambrian-1: A fully open, vision-centric exploration of multimodal LLMs. arXiv preprint arXiv:2406.16860 (2024)
|
| 327 |
+
46. Topiwala, P., Dai, W., Pian, J., Biondi, K., Krovvidi, A.: VMAF and variants: Towards a unified VQA. In: Applications of Digital Image Processing. vol. 11842, pp. 96-104 (2021)
|
| 328 |
+
|
| 329 |
+
47. Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., et al.: LLaMA: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)
|
| 330 |
+
48. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems. vol. 30, pp. 5998-6008 (2017)
|
| 331 |
+
49. Wang, Z., Xu, K., Yang, Y., Dong, J., Gu, S., Xu, L., Fang, Y., Ma, K.: Measuring perceptual color differences of smartphone photographs. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(8), 10114-10128 (2023)
|
| 332 |
+
50. Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: From error visibility to structural similarity. IEEE Transactions on Image Processing 13(4), 600-612 (2004)
|
| 333 |
+
51. Wei, J., Bosma, M., Zhao, V.Y., Guu, K., Yu, A.W., Lester, B., Du, N., Dai, A.M., Le, Q.V.: Finetuned language models are zero-shot learners. In: International Conference on Learning Representations (2022)
|
| 334 |
+
52. Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q.V., Zhou, D., et al.: Chain-of-thought prompting elicits reasoning in large language models. In: Advances in Neural Information Processing Systems. vol. 35, pp. 24824-24837 (2022)
|
| 335 |
+
53. Wu, H., Zhang, Z., Zhang, E., Chen, C., Liao, L., Wang, A., Li, C., Sun, W., Yan, Q., Zhai, G., et al.: Q-Bench: A benchmark for general-purpose foundation models on low-level vision. In: International Conference on Learning Representations (2024)
|
| 336 |
+
54. Wu, H., Zhang, Z., Zhang, E., Chen, C., Liao, L., Wang, A., Xu, K., Li, C., Hou, J., Zhai, G., et al.: Q-Instruct: Improving low-level visual abilities for multi-modality foundation models. arXiv preprint arXiv:2311.06783 (2023)
|
| 337 |
+
55. Wu, H., Zhang, Z., Zhang, W., Chen, C., Liao, L., Li, C., Gao, Y., Wang, A., Zhang, E., Sun, W., et al.: Q-Align: Teaching LMMs for visual scoring via discrete text-defined levels. arXiv preprint arXiv:2312.17090 (2023)
|
| 338 |
+
56. Wu, H., Zhu, H., Zhang, Z., Zhang, E., Chen, C., Liao, L., Li, C., Wang, A., Sun, W., Yan, Q., et al.: Towards open-ended visual quality comparison. arXiv preprint arXiv:2402.16641 (2024)
|
| 339 |
+
57. Wu, T., Shi, S., Cai, H., Cao, M., Xiao, J., Zheng, Y., Yang, Y.: Assessor360: Multi-sequence network for blind omnidirectional image quality assessment. In: Advances in Neural Information Processing Systems. vol. 36, pp. 1-14 (2024)
|
| 340 |
+
58. Yang, S., Wu, T., Shi, S., Lao, S., Gong, Y., Cao, M., Wang, J., Yang, Y.: MANIQA: Multi-dimension attention network for no-reference image quality assessment. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshop. pp. 1191-1200 (2022)
|
| 341 |
+
59. Yang, Z., Li, L., Lin, K., Wang, J., Lin, C.C., Liu, Z., Wang, L.: The dawn of LMMs: Preliminary explorations with GPT-4V(ison). arXiv preprint arXiv:2309.17421 (2023)
|
| 342 |
+
60. Ye, P., Doermann, D.: Active sampling for subjective image quality assessment. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4249-4256 (2014)
|
| 343 |
+
61. Ye, P., Kumar, J., Kang, L., Doermann, D.: Unsupervised feature learning framework for no-reference image quality assessment. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1098-1105 (2012)
|
| 344 |
+
62. Ye, Q., Xu, H., Ye, J., Yan, M., Liu, H., Qian, Q., Zhang, J., Huang, F., Zhou, J.: mPLUG-Owl2: Revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257 (2023)
|
| 345 |
+
|
| 346 |
+
63. Yin, S., Fu, C., Zhao, S., Li, K., Sun, X., Xu, T., Chen, E.: A survey on multimodal large language models. arXiv preprint arXiv:2306.13549 (2023)
|
| 347 |
+
64. Ying, Z., Niu, H., Gupta, P., Mahajan, D., Ghadiyaram, D., Bovik, A.: From patches to pictures (PaQ-2-PiQ): Mapping the perceptual space of picture quality. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3575-3585 (2020)
|
| 348 |
+
65. You, Z., Gu, J., Li, Z., Cai, X., Zhu, K., Xue, T., Dong, C.: Descriptive image quality assessment in the wild. arXiv preprint arXiv:2405.18842 (2024)
|
| 349 |
+
66. You, Z., Li, Z., Gu, J., Yin, Z., Xue, T., Dong, C.: Depicting beyond scores: Advancing image quality assessment through multi-modal language models. arXiv preprint arXiv:2312.08962 (2023)
|
| 350 |
+
67. Zhang, L., Zhang, L., Mou, X., Zhang, D.: FSIM: A feature similarity index for image quality assessment. IEEE Transactions on Image Processing 20(8), 2378-2386 (2011)
|
| 351 |
+
68. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 586-595 (2018)
|
| 352 |
+
69. Zhang, W., Ma, K., Zhai, G., Yang, X.: Uncertainty-aware blind image quality assessment in the laboratory and wild. IEEE Transactions on Image Processing 30, 3474-3486 (2021)
|
| 353 |
+
70. Zhang, W., Zhai, G., Wei, Y., Yang, X., Ma, K.: Blind image quality assessment via vision-language correspondence: A multitask learning perspective. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14071-14081 (2023)
|
| 354 |
+
71. Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)
|
| 355 |
+
72. Zhu, H., Sui, X., Chen, B., Liu, X., Chen, P., Fang, Y., Wang, S.: 2AFC prompting of large multimodal models for image quality assessment. arXiv preprint arXiv:2402.01162 (2024)
|
| 356 |
+
73. Zhuang, S., Hadfield-Menell, D.: Consequences of misaligned AI. In: Advances in Neural Information Processing Systems. vol. 33, pp. 15763-15773 (2020)
|
acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:35cd1647df53a385ac6a558a90a4ddb663b13e75aed3217a92f8f1731f4bc186
|
| 3 |
+
size 669506
|
acomprehensivestudyofmultimodallargelanguagemodelsforimagequalityassessment/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74fa1000c3377d2d27dc0387d1cf08c3741068dcdf530c820c44e33d40ce6fe0
|
| 3 |
+
size 458541
|
action2soundambientawaregenerationofactionsoundsfromegocentricvideos/4aec7362-c326-48a6-8c9d-60c43e8b873f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d51e895bd362be089bd21b0e3dfdb7b1f4095444dbf4fbea1c67d2f132b08ea2
|
| 3 |
+
size 87175
|
action2soundambientawaregenerationofactionsoundsfromegocentricvideos/4aec7362-c326-48a6-8c9d-60c43e8b873f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e41025be8366737cae208f28dbb7a12a454147baf3d929e3a1caaa356ca6f18
|
| 3 |
+
size 108158
|
action2soundambientawaregenerationofactionsoundsfromegocentricvideos/4aec7362-c326-48a6-8c9d-60c43e8b873f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:beb2a197e04e78cdac4ecfcbf5dc3f5e08d9ac85651ad64a38b0a3d1220e5c1d
|
| 3 |
+
size 5798775
|
action2soundambientawaregenerationofactionsoundsfromegocentricvideos/full.md
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Action2Sound: Ambient-Aware Generation of Action Sounds from Egocentric Videos
|
| 2 |
+
|
| 3 |
+
Changan Chen $^{1*}$ , Puyuan Peng $^{1*}$ , Ami Baid $^{1}$ , Zihui Xue $^{1}$ , Wei-Ning Hsu $^{2}$ , David Harwath $^{1}$ , and Kristen Grauman $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> University of Texas at Austin
|
| 6 |
+
<sup>2</sup> FAIR, Meta
|
| 7 |
+
|
| 8 |
+
Abstract. Generating realistic audio for human actions is important for many applications, such as creating sound effects for films or virtual reality games. Existing approaches implicitly assume total correspondence between the video and audio during training, yet many sounds happen off-screen and have weak to no correspondence with the visuals—resulting in uncontrolled ambient sounds or hallucinations at test time. We propose a novel ambient-aware audio generation model, AV-LDM. We devise a novel audio-conditioning mechanism to learn to disentangle foreground action sounds from the ambient background sounds in in-the-wild training videos. Given a novel silent video, our model uses retrieval-augmented generation to create audio that matches the visual content both semantically and temporally. We train and evaluate our model on two in-the-wild egocentric video datasets, Ego4D and EPIC-KITCHENS, and we introduce Ego4D-Sounds—1.2M curated clips with action-audio correspondence. Our model outperforms an array of existing methods, allows controllable generation of the ambient sound, and even shows promise for generalizing to computer graphics game clips. Overall, our approach is the first to focus video-to-audio generation faithfully on the observed visual content despite training from uncurated clips with natural background sounds.
|
| 9 |
+
|
| 10 |
+
Keywords: audio-visual learning $\cdot$ egocentric video understanding
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
As we interact with objects around us in our daily lives, our physical actions often produce sound, e.g., clicking on a mouse, closing a door, or cutting vegetables. The distinct characteristics of these action sounds depend upon the type of action being performed, the shapes and materials of the objects being acted upon, the amount of force being applied, and so forth. Vision not only captures what physical interaction happens but also informs us when the interaction happens, suggesting the possibility of synthesizing semantically plausible and temporally synchronous action sounds from silent videos alone. This capability would accelerate many real-world applications, such as text-to-video generation, generating sound effects for films (Foley), or sound effect generation for virtual reality (VR) and video games.
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
Fig. 1: Real-world audio consists of both foreground action sounds (whose causes are visible) and background ambient sounds generated by sources offscreen. Whereas prior generation work is agnostic to this division, our method is ambient-aware and disentangles action sound from ambient sound. Our key technical insight is how to train with in-the-wild videos exhibiting natural ambient sounds, while still learning to factor out their effects on generation. The green arrows reference how we condition generation on sound from a related, but time-distinct, video clip to achieve this.
|
| 18 |
+
|
| 19 |
+
Some prior work studies impact sound synthesis from videos [42, 48] while others target more general video-to-audio generation [26, 38]. These methods implicitly assume total correspondence between the video and audio and aim to generate the whole target audio from the video. However, this strategy falls short for in-the-wild training videos, which are rife with off-screen ambient sounds, e.g., traffic noise, people talking, or A/C running. While some of these ambient sounds are weakly correlated with the visual scene, such as the wind blowing in an outdoor environment, many of them have no visual correspondence, such as off-screen speech or a stationary buzzing noise from the fridge. Most existing methods are not able to disentangle action sounds from ambient sounds and treat them as a whole, leading to uncontrolled generation of ambient sounds at test time and sometimes even hallucination, e.g., random action or ambient sounds. This is particularly problematic for generating action sounds because they are often subtle and transient compared to ambient sounds. For example, trained in the traditional way, a model given a scene that looks like a noisy restaurant risks generating "restaurant-like" ambient sounds, while ignoring the actual movements and activities of the foreground actions, such as a person stirring their coffee with a metal spoon.
|
| 20 |
+
|
| 21 |
+
How can we disentangle the foreground action sounds from background ambient sounds for in-the-wild video data without ground truth separated streams? Simply applying a noise removal algorithm on the target audio does not work well since in-the-wild blind source separation of general sounds from a single microphone is still an open challenge [49], and class-dependent models for predicting visually relevant sounds cannot generalize to in-the-wild video [6].
|
| 22 |
+
|
| 23 |
+
Our key observation is that while action sounds are highly localized in time, ambient sounds tend to persist across time. Given this observation, we propose a simple but effective solution to disentangle ambient and action sounds: during training, in addition to the input video clip, we also condition the generation model on an audio clip from the same long video as the input video clip but from different timestamps. See Fig. 1. By doing so, we lift the burden of generating energy-dominating ambient sounds and encourage the model to focus on learning
|
| 24 |
+
|
| 25 |
+
action cues from the visual frames to generate action sounds. At test time, we do not assume access to (even other clips of) the ground truth video/audio. Instead, we propose to retrieve an audio segment from the training set with an audio-visual similarity scoring model, inspired by recent ideas in retrieval-augmented generation (RAG) [20,30,34]. This benefits examples where the visual scene has a weak correlation with the ambient sound that is beneficial to capture, e.g., outdoor environments.
|
| 26 |
+
|
| 27 |
+
Existing action sound generation work relies on either clean, manually-collected data that has a limited number of action categories [9, 42, 48], or videos crawled from YouTube based on predefined taxonomies [5,6,16,26]. To expand the boundary of action sound generation to in-the-wild human actions, we take advantage of recent large-scale egocentric video datasets [10, 18]. Though our model is not tailored to egocentric video in any way, there are two main benefits of using these datasets: 1) egocentric videos provide a close view of human actions compared to exocentric videos, where hand-object interactions are observed from a distance and are often occluded, and 2) these datasets have timestamped narrations describing atomic actions. We design a pipeline to extract and process clips from Ego4D, curating Ego4D-Sounds with 1.2 million audio-visual action clips. $^3$
|
| 28 |
+
|
| 29 |
+
Our idea of disentangling action and ambient sounds implicitly in training is model-agnostic. In this paper, we instantiate it by designing an audio-visual latent diffusion model (AV-LDM) that conditions on both modality streams for audio generation. We evaluate our AV-LDM against recent work on a wide variety of metrics and show that our model outperforms the existing methods significantly on both Ego4D-Sounds and EPIC-KITCHENS. We conduct a human evaluation study that shows our model synthesizes plausible action sounds according to the video. Please see/listen for yourself in our supplementary video! We also show promising preliminary results on virtual reality game clips. To the best of our knowledge, this is the first work that demonstrates the disentanglement of foreground action sounds from background sounds for action-to-sound generation on in-the-wild videos.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
# 2.1 Action Sound Generation
|
| 34 |
+
|
| 35 |
+
A pioneering work for capturing human-generated action sounds collects videos where people hit, scratch, or prod objects with a drumstick [42]. This is an early inspirational effort, though it is by design limited in the type of actions. The robotics community also studies this problem by using robotic platforms to collect collision sounds and analyze or synthesize them from video [9,15]. Other work simulates collision events [14], which remains difficult for action sounds due to the complexity of the physical interactions. Most existing methods demonstrate good synthesis results when the data are noise-free. However, they are not equipped to learn from in-the-wild action videos, where the action sound is always coupled with ambient sound. Sharing our motivation to disregard irrelevant sounds, the REGNET framework [6] aims to predict visually relevant sounds by
|
| 36 |
+
|
| 37 |
+
conditioning on ground truth audio with a bottleneck design. However, it does not allow controllable generation and risks learning to copy the target action and ambient sound leading to weaker empirical performance. More importantly, REGNET [6] requires curated datasets to train class-dependent models, which prevents generalization to in-the-wild data, as we will see in results. We propose an ambient-aware model to deal with this issue head-on and also introduce the Ego4D-Sounds dataset to expand action sound synthesis to in-the-wild actions.
|
| 38 |
+
|
| 39 |
+
# 2.2 Egocentric Video Understanding with Audio
|
| 40 |
+
|
| 41 |
+
Understanding human activities in videos has long been a core challenge of computer vision. Early research studies activity recognition from exocentric video [13, 28, 47]. Recent work explores the egocentric setting and introduces large egocentric datasets such as Ego4D [18], EPIC-KITCHENS [10], and EgoExo4D [19]. Leveraging both the video and audio streams in egocentric videos, many interesting tasks are enhanced, such as action recognition [29], localization [44], active speaker localization [27, 39], sounding object localization [4, 23], and state-aware representations [40]. Most related to our work is SoundingActions [3] which learns visual representations of actions that make sounds, and is valuable for indexing and recognition problem settings, but ill-equipped for generation, as we show later. All existing audio-visual learning for egocentric video focuses on perception, i.e., understanding what happens in the video. In contrast, we target the video-to-audio generation problem. Furthermore, relative to any of the above, our idea to implicitly learn to disentangle the action sound from ambient sounds is novel.
|
| 42 |
+
|
| 43 |
+
# 2.3 Diffusion Models and Conditional Audio Generation
|
| 44 |
+
|
| 45 |
+
Diffusion models have attracted significant attention recently because of their high fidelity generation [11,41,45]. Initially proposed for image generation [21,46], they have also been successfully applied to speech and audio generation [24,33,36, 43, 51]. Benefitting from classifier-free guidance [22] and large-scale representation learning, AudioLDM [36] and Make-An-Audio [24] perform diffusion-based text-to-audio generation. More recently, Diff-Foley [38] adapts latent diffusion models for video-to-audio generation by first conducting audio-video contrastive learning and then video-conditioned audio generation. While promising, it does not address the background ambient sound problem. Inspired by recent work on retrieval-augmented generation (RAG) for text [2,20,30,34] and image generation [1,7], we show our audio-conditioning insight carries over to inference time via a retrieval component of the model. Conditional video-to-audio generation conditions on either a physics prior to guide diffusion-based impact sound generation [48] or, in CondFoleyGen [12], another video clip to modify characteristics of the action sound. Our method also considers additional conditioning signals to control the output, but for a very different purpose; our model is the first to address foreground/background sound disentanglement in generation.
|
| 46 |
+
|
| 47 |
+
# 3 Ambient-aware Action Sound Generation
|
| 48 |
+
|
| 49 |
+
We first discuss our high-level idea of how to guide the generation model to disentangle action sounds from ambient sounds. We then devise AV-LDM, an
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
Fig. 2: Illustration of the harm of ambient sound in video-to-audio generation. In this example, this person is closing a packet of ginger powder, which makes some rustling sound (red circled in the middle). There is also some buzzing sound semantically irrelevant to the visual scene in the background, which dominates the energy of the spectrogram. On the right-hand side, we show a prediction made by a vanilla model that misses the action sound but predicts the ambient sound.
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
|
| 58 |
+
extension of latent diffusion models (LDM) to accommodate both audio and video conditions. We also discuss our pretraining stage.
|
| 59 |
+
|
| 60 |
+
# 3.1 Action-to-Sound Generation
|
| 61 |
+
|
| 62 |
+
Given a video $V \in \mathbb{R}^{(T*S_V) \times H \times W \times 3}$ , where $T$ is the duration of the video and $S_V$ is the video sample rate, and the accompanying audio waveform $A \in \mathbb{R}^{1 \times (T*S_A)}$ , where $S_A$ is the audio sample rate, our goal is to model the conditional distribution $p(A|V)$ for video-to-audio generation. During training we observe natural video coupled with its audio, whereas at inference time we have only a silent video—e.g., could be an output from text-to-video, or a VR/video game clip, or simply a real-world video for which we want to generate new plausible sounds.
|
| 63 |
+
|
| 64 |
+
# 3.2 Disentangling Action and Ambient Sounds
|
| 65 |
+
|
| 66 |
+
Learning a video-to-audio generation model using in-the-wild egocentric videos is challenging because of entangled foreground action and background ambient sounds, as illustrated in Fig. 2. More specifically, the reasons are two-fold: 1) while action sounds are usually of very short duration, ambient sounds can last the entire clip, and therefore dominate the loss, leading to low-quality action sound generation; 2) while some ambient sounds might be semantically related to the visual scene such as bird chirping in the woods, in many cases, ambient sounds are difficult to infer from the visual scene because they are the results of the use of certain microphones, recording conditions, people speaking, off-screen actions, etc. Forcing a generation model to learn those background sounds from video results in hallucinations during inference (see examples in Fig. 6).
|
| 67 |
+
|
| 68 |
+
Therefore, it is important to proactively disentangle action sounds and ambient sounds during training. However, separating in-the-wild ambient sounds is still an open challenge: recent models rely on supervised training using artificially mixed sounds, for which the ground truth complex masks can be obtained [49]. Simply applying off-the-shelf noise reduction methods to training data leads to poor performance, as we will show in Sec. 5.
|
| 69 |
+
|
| 70 |
+
While it is difficult to explicitly separate the ambient and action sound in the target audio, our key observation is that ambient sounds are usually fairly stationary across time. Given this observation, we propose a simple but effective method to achieve the disentanglement. During training, in addition to video clip
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
Fig. 3: Audio condition selection and the model architecture. Left: During training, we randomly sample a neighbor audio clip as the audio condition. For inference, we query the training set audio with the (silent) input video and retrieve an audio clip that has the highest audio-visual similarity with the input video using our trained AV-Sim model (Sec. 3.5). Right: We represent audio waveforms as spectrograms and use a latent diffusion model to generate the spectrogram conditioned on both the input video and the audio condition. At test time, we use a trained vocoder network to transform the spectrogram to a waveform.
|
| 74 |
+
|
| 75 |
+
$V$ , we also provide the model an audio clip $A_{n}$ that comes from the same training video but a different timestamp as the input video clip (see Fig. 3). Therefore, instead of modeling $p(A|V)$ , we model $p(A|V,A_n)$ . Given the hypothesis that $A_{n}$ is likely to share ambient sound characteristics with $A$ , it can take away the burden of learning weakly correlated or even uncorrelated ambient sounds from visual input alone, and encourages the model to focus on learning action features from the visual input. For the selection of $A_{n}$ , we randomly sample one audio clip from the nearest $X$ clips in time. While there is no guarantee that the sampled audio shares exactly the same ambient sound with the target audio, their ambient sounds should largely overlap since they are close in time, which provides a consistent learning signal to help the model learn the disentanglement. While is possible for the sampled audio to contain repetitions of the target action sound, 1) the chance of selecting a semantically relevant sound low ( $9\%$ based on (verb,noun) taxonomy) and 2) the precise temporal onset is almost never the same, thus making it impossible for the model to cheat in training.
|
| 76 |
+
|
| 77 |
+
# 3.3 Retrieval Augmented Generation and Controllable Generation
|
| 78 |
+
|
| 79 |
+
While during training we have access to the clips in the same long video as the input clip, we of course cannot access that information at test time. How we select $A_{n}$ at test time depends on the purpose of the generation. We consider two use cases: action-ambient joint generation and action-focused generation. In the first scenario, we would like the model to generate both the action sound and the ambient sound that is plausible for the visual environment. This is useful, for example, for generating sound effects for videos. In the latter scenario, we would like the model to focus the generation on action sounds and minimize ambient sounds, which is useful, for example, for generating sounds for games. Fig. 4 depicts the two scenarios.
|
| 80 |
+
|
| 81 |
+
For action-ambient joint generation, we want $A_{n}$ to be semantically relevant to the visual scene. Inspired by recent work in retrieval augmented regeneration,
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
Fig. 4: Two inference settings: "action-ambient joint generation" and "action-focused generation". In the first setting, we condition on audio retrieved from the training set and aim to generate both plausible action and ambient sounds. In the second setting, we condition on an audio file with low ambient sound and the model focuses on generating plausible action sounds while minimizing the ambient sounds.
|
| 85 |
+
|
| 86 |
+
we propose to retrieve audio such that:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
A _ {n} = \underset {A _ {i} \in \mathcal {D}} {\arg \max } \operatorname {A V - S i m} \left(A _ {i}, V\right), \tag {1}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where $\mathcal{D}$ is the dataset of all training audio clips and $V$ is the (silent) input video. AV-Sim(A, V) is a similarity scoring function that measures the similarity between $A$ and $V$ , which we will cover in Sec. 3.5.
|
| 93 |
+
|
| 94 |
+
For action-focused generation, we want $A_{n}$ to have minimal ambient level. We find simply filling $A_{n}$ with all zeros results in poor performance, likely because it is too far out of the training distribution. Instead, we find conditioning the generation on a low-ambient sound will cue the model to focus on action sound generation and generate minimal ambient sound. See Sec. 5.2.
|
| 95 |
+
|
| 96 |
+
# 3.4 Audio-Visual Latent Diffusion Model
|
| 97 |
+
|
| 98 |
+
While the above idea of disentanglement is universal and not specific to any model architecture, here we instantiate this idea on diffusion models due to their success in audio generation [36, 38]. We extend the latent diffusion model to accommodate our audio-visual conditions, thus yielding an audio-visual latent diffusion model (AV-LDM).
|
| 99 |
+
|
| 100 |
+
Fig. 3 (right) shows the architecture of our model. During training, given audio waveform target $A$ , we first compute the mel-spectrogram $x_0 \in \mathbb{R}^{T \times D_{\mathrm{mel}}}$ , where $D_{\mathrm{mel}}$ is the number of mel bins. We then use a pretrained Variational Autoencoder (VAE) to compress the mel-spectrogram $x_0$ to a latent representation $z_0 \in \mathbb{R}^{C' \times H' \times W'}$ , where $z_0$ is the generation target of the LDM. We condition the generation on both the video feature $c_v \in \mathbb{R}^{T_v, D_c}$ and audio feature $c_a \in \mathbb{R}^{T_a, D_c}$ . We extract the video feature with a pretrained video encoder (see Sec. 3.5) from $V$ . We extract the audio feature from the audio condition $A_n$ with the same VAE encoder and then transform the feature into 1-d vector with a multilayer perceptron (MLP).
|
| 101 |
+
|
| 102 |
+
Following [38], we use cross attention where the query is produced by $z_{t}$ , which is the sample diffusion step $t$ , and key and value are produced by $\text{concat}([Pos_v + c_v; Pos_a + c_a])$ , where $Pos$ denotes learnable positional embeddings.
|
| 103 |
+
|
| 104 |
+
The model is trained with the denoising objective:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\mathcal {L} = \mathbb {E} _ {t \sim \text {u n i f o r m} (1, T), z _ {0}, \epsilon_ {t}} \| \epsilon_ {t} - \epsilon_ {\theta} (\mathbf {x} _ {t}, t, c _ {v}, c _ {a}) \| ^ {2},
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $\epsilon_{t}$ is the standard Gaussian noise sampled for diffusion step $t$ , and $\epsilon_{\theta}(\mathbf{x}_t,t,c_v,c_a)$ is the model estimation of it ( $\theta$ represents model parameters).
|
| 111 |
+
|
| 112 |
+
The reverse process can be parameterized as:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
p (z _ {T}) = \mathcal {N} (0, I),
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
p _ {\theta} (z _ {t - 1} | z _ {t}) = \mathcal {N} (z _ {t - 1}; \frac {1}{\sqrt {\alpha_ {t}}} \Big (z _ {t} - \frac {1 - \alpha_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} \epsilon_ {\theta} (z _ {t}, t, c _ {v}, c _ {a}) \Big), \sigma_ {t} ^ {2} I),
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $\alpha_{t}$ and $\sigma_{t}$ are determined by noise schedule of the diffusion process. To generate audio during inference, we first sample standard Gaussian noise $z_{T}$ , and then apply classifier free guidance [22] to estimate $\tilde{\epsilon}_{\theta}$ as
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
\tilde {\epsilon} _ {t} (z _ {t}, t, c _ {v}, c _ {a}) = \omega \epsilon_ {\theta} (z _ {t}, t, c _ {v}, c _ {a}) + (1 - \omega) \epsilon_ {\theta} (z _ {t}, t, \emptyset , \emptyset),
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
where $\varnothing$ denotes zero tensor. For the above estimation to be more precise, during training, we randomly replace $c_{v}$ with $\varnothing$ with probability 0.2. As for $c_{a}$ , we found dropping it even with even a small probability harms the performance, and therefore we always condition the LDM with $c_{a}$ .
|
| 129 |
+
|
| 130 |
+
During inference, we use DPM-Solver [37] on LDM to sample a latent representation, which is then upsampled into a mel-spectrogram by the decoder of VAE. Lastly, we use a vocoder (HiFi-GAN [32]) model to generate waveform from the mel-spectrogram.
|
| 131 |
+
|
| 132 |
+
# 3.5 Audio-Visual Representation Learning
|
| 133 |
+
|
| 134 |
+
Generating semantically and temporally synchronized action sounds from video requires the video encoder to capture these relevant features. In addition, we would like to train a video model and an audio model whose representations align in the embedding space to support retrieval-augmented generation discussed in Sec. 3.3. For this purpose, we train a video encoder and audio encoder contrastively to optimize the following objective:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\mathrm {A V - S i m} (A, V) = - \frac {1}{| \mathcal {B} |} \sum_ {t \in \mathcal {B}} \log \frac {\exp (e _ {A} ^ {t} e _ {V} ^ {t} / \tau)}{\sum_ {l \in \mathcal {B}} \exp (e _ {A} ^ {t} e _ {V} ^ {l} / \tau)},
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
where $\mathcal{B}$ is the current batch of data, $e_A^t$ and $e_V^t$ are normalized embeddings of the audio and video features, $\tau$ is a temperature parameter. To leverage the full power of narrations on Ego4D, we initialize the video encoder weights from models pre-trained on video and language from [35].
|
| 141 |
+
|
| 142 |
+
# 3.6 Implementation Details
|
| 143 |
+
|
| 144 |
+
We use Ego4D-Sounds (see Sec. 4) to train our AV-LDM. Video is sampled at 5FPS and audio is sampled at $16\mathrm{kHz}$ . Video is passed through the pre-trained video encoder to produce condition features $c_{v} \in \mathbb{R}^{16 \times 768}$ . The audio waveform is transformed into a mel-spectrogram with a hop size of 256 and 128 mel bins. The mel-spectrogram is then passed to the VAE encoder with padding in the temporal dimension to produce target $z_{0} \in \mathbb{R}^{4 \times 16 \times 24}$ . The audio condition is processed the same way except that we use an additional MLP to process VAE's output to produce $c_{a} \in \mathbb{R}^{24 \times 768}$ . We load the weights of VAE and LDM from
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
C opens the blender
|
| 160 |
+
It drops the twig in his left hand on the ground
|
| 161 |
+
C paints the canvas
|
| 162 |
+
C removes soil with the hoe
|
| 163 |
+
C sews cloth
|
| 164 |
+
Fig. 5: Example clips in Ego4D-Sounds. We show one video frame, the action description, and the sound for each example. Note how these actions are subtle and long-tail, usually not present in typical video datasets.
|
| 165 |
+
|
| 166 |
+
<table><tr><td>Datasets</td><td>Clips</td><td>Language</td><td>Action Types</td></tr><tr><td>The Greatest Hits [42]</td><td>46.6K</td><td>×</td><td>Hit, scratch, prod</td></tr><tr><td>VGG-Sound [5]</td><td>200K</td><td>Video tags</td><td>Not action-specific</td></tr><tr><td>EPIC-SOUNDS [25]</td><td>117.6K</td><td>Audio labels</td><td>Kitchen actions</td></tr><tr><td>Ego4D-Sounds</td><td>1.2M</td><td>Action narrations</td><td>In-the-wild actions</td></tr></table>
|
| 167 |
+
|
| 168 |
+
Table 1: Comparison with other audio-visual action datasets. Ego4D-Sounds not only has one order of magnitude more clips, but it is also coupled with language descriptions, supporting evaluation of sound generation based on semantics.
|
| 169 |
+
|
| 170 |
+
the pretrained Stable Diffusion to speed up training, similar to [38], and VAE is kept frozen during training. LDM is trained for 8 epochs with batch size 720 on Ego4D-Sounds with the AdamW optimizer with learning rate $1e - 4$ . During inference, we use 25 sampling steps with classifier-free guidance scale $\omega = 6.5$ . For HiFi-GAN, we train it on a combination of 0.5s segments from Ego4D [18], Epic-Kitchens [25], and AudioSet [16]. We use AdamW to train HiFi-GAN with a learning rate of $2e - 4$ and batch size of 64 for 120k steps. We set the number of random nearby audio samples $X = 6$ . See more details in Supp.
|
| 171 |
+
|
| 172 |
+
# 4 The Ego4D-Sounds Dataset
|
| 173 |
+
|
| 174 |
+
Next we describe how we curate Ego4D-Sounds, an audio-video dataset for human action sound generation. Our goal is to curate a large-scale high-quality dataset for action-audio correspondence for action-to-sound generation, addressing the issue of limited action types and scale in the existing impact sound datasets [8, 42], as well as more general audio-video datasets [5, 25].
|
| 175 |
+
|
| 176 |
+
Ego4D [18] is an existing large-scale egocentric video dataset that has more than 3,600 hours of video recordings depicting hundreds of daily activities; 2,113 of those hours have audio available. It also has time-stamped narrations that are free-form sentences describing the current activity performed by the camerewarer. Utilizing the narration timestamps in Ego4D to extract clips directly results in a noisy dataset, since not all clips have meaningful action sounds and there are many actions like "talk with someone", "look around", "turn around" that have low audio-visual correspondence. To ensure Ego4D-Sounds has high action-sound correspondence, we use an automatic pipeline that consists of metadata-based filtering, audio tagging, and energy-based filtering to process all extracted clips, which yields 1.2 million audio-visual action clips plus 11k
|
| 177 |
+
|
| 178 |
+
clips for evaluation. See Supp. for more details on the data processing pipeline. We show examples in Fig. 5 and comparison with other datasets in Tab. 1.
|
| 179 |
+
|
| 180 |
+
For all resulting clips, we extract them as 3s clips with $224 \times 224$ image resolution at 30 FPS. For audio, we extract them as a single channel with a 16000 sample rate.
|
| 181 |
+
|
| 182 |
+
# 5 Experiments
|
| 183 |
+
|
| 184 |
+
To evaluate the performance of our model, we use the following metrics:
|
| 185 |
+
|
| 186 |
+
1. Fréchet Audio Distance (FAD) [31]: evaluates the quality of generated audio clips against ground truth audio clips by measuring the similarity between their distributions. We use the public pytorch implementation.<sup>4</sup>
|
| 187 |
+
2. Audio-visual synchronization (AV-Sync) [38]: a binary classification model that classifies whether the video and generated audio streams are synchronized. Following [38], we create negative examples by either shifting audio temporally or sampling audio from a different video clip. Details in Supp.
|
| 188 |
+
3. Contrastive language-audio contrastive (CLAP) scores [50]: evaluates the semantic similarity between the generated audio and the action description. We finetune the CLAP model on the Ego4D-Sounds data and compute scores for the generated audio and the narration at test time.
|
| 189 |
+
|
| 190 |
+
These metrics measure different aspects of generation collectively, including the distribution of generated samples compared to the ground truth clips, synchronization with the video, and the semantic alignment with the action description. We compare with the following baseline methods:
|
| 191 |
+
|
| 192 |
+
1. Retrieval: we retrieve the audio from the training set using the AV-Sim model introduced in Sec. 3.5. This method represents retrieval-based generation models such as ImageBind [17].
|
| 193 |
+
2. REGNET [6]: a video-to-audio model that uses a bottleneck design to generate visually-relevant sounds. We run their trained model on our test set.
|
| 194 |
+
3. Spec-VQGAN [26]: a video-to-audio model that generates audio based on a codebook of spectrograms. We run their pre-trained model on our test set.
|
| 195 |
+
4. Diff-Foley [38]: a recent LDM-based model. We follow their fine-tuning steps on egocentric videos to train on our dataset.
|
| 196 |
+
|
| 197 |
+
In addition, we provide ablations: "w/o vocoder": we replace the trained HiFiGAN vocoder with Griffin-Lim; "w/o cond": we remove the audio condition at training time; "w/o cond + denoiser": we use an off-the-shelf model to denoise the target audio $^{6}$ ; "w/ random test cond": we use random audio from the training set as the condition instead of retrieving audio with the highest AV-Sim score.
|
| 198 |
+
|
| 199 |
+
# 5.1 Results on Ego4D-Sounds
|
| 200 |
+
|
| 201 |
+
First we evaluate the ambient-sound joint generation setting with retrieval augmented generation. Tab. 2 shows the results. Compared to all three baselines, we outperform them on all three metrics by a large margin. While the Retrieval
|
| 202 |
+
|
| 203 |
+
<table><tr><td></td><td>FAD ↓</td><td>AV-Sync (%)↑</td><td>CLAP↑</td></tr><tr><td>Ground Truth (Upper Bound)</td><td>0.0000</td><td>77.69</td><td>0.2698</td></tr><tr><td>Retrieval</td><td>1.8353</td><td>11.84</td><td>0.0335</td></tr><tr><td>REGNET [6]</td><td>8.3800</td><td>3.90</td><td>0.9900</td></tr><tr><td>Spec-VQGAN [26]</td><td>3.9017</td><td>7.12</td><td>0.0140</td></tr><tr><td>Diff-Foley [38]</td><td>3.5608</td><td>5.98</td><td>0.0346</td></tr><tr><td>Ours w/o vocoder</td><td>4.9282</td><td>29.60</td><td>0.1319</td></tr><tr><td>Ours w/o cond + denoiser</td><td>1.4676</td><td>1.09</td><td>0.0009</td></tr><tr><td>Ours w/o cond</td><td>1.4681</td><td>39.63</td><td>0.1418</td></tr><tr><td>Ours w/ random test cond</td><td>1.0635</td><td>28.74</td><td>0.1278</td></tr><tr><td>AV-LDM (Ours)</td><td>0.9999</td><td>45.74</td><td>0.1435</td></tr></table>
|
| 204 |
+
|
| 205 |
+
Table 2: Results on Ego4D-Sounds test set. We also report the performance of the ground truth audio, which gives the upper bound value for each metric.
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
Fig. 6: Qualitative examples showing frames followed by the waveform/spectrogram of various baselines. Our model generates the most synchronized sounds.
|
| 209 |
+
|
| 210 |
+
baseline retrieves natural sounds from the training set and has a low FAD score compared to Spec-VQGAN and Diff-Foley, both its AV-Sync accuracy and CLAP scores are very low. Diff-Foley performs better than Spec-VQGAN since it has been trained on this task, but it still largely underperforms our model w/o cond, likely because its video features do not generalize to the egocentric setting well. REGNET performs the worst likely due to its failure to account for strongly present ambient sounds and its assumption for a fixed taxonomy of sounds.
|
| 211 |
+
|
| 212 |
+
For ablations, "Ours w/o cond" has a much worse FAD score compared to the full model, showing the importance of our ambient-aware training. As expected, "Ours w/o cond + denoiser" has very low scores on AV-Sync and CLAP since existing noise reduction algorithms are far from perfect. We also test our model by conditioning it on a random audio segment at test time instead of the one retrieved with the highest audio-visual similarity and its performance also gets worse, verifying the effectiveness of our retrieval-based solution.
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
(a) Varying ambient level condition
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
(b) Audio generation accuracy (FAD)
|
| 219 |
+
Fig. 7: The achieved ambient level and accuracy as a function of the input ambient levels. (a) We show the ambient level of our model changes according to the ambient level in the audio condition while the ambient level of "Ours w/o cond" and the original audio stay constant, illustrating the controllability of our model. (b) FAD is low for most input ambient levels unless it goes too extreme (too low or too high), showing our model generates high-quality action sounds even when varying output ambient levels.
|
| 220 |
+
|
| 221 |
+
We show two qualitative examples in Fig. 6 comparing our model with several baselines. Our model synthesizes both more synchronized and more plausible sounds. To fully evaluate our results, it is important to view the Supp. video.
|
| 222 |
+
|
| 223 |
+
# 5.2 Ambient Sound Control
|
| 224 |
+
|
| 225 |
+
By disentangling action sounds from ambient sounds, our model allows taking any given sound as the condition at test time. To examine whether our model truly relies on the audio condition to learn the ambient sound information, we next test the model by providing audio conditions of various ambient levels and then calculate the ambient level in the generated audio. The ambient level is defined as the lowest energy of any 0.5s audio segment in a 3s audio.
|
| 226 |
+
|
| 227 |
+
Fig. 7 shows the results, where we also plot the ambient levels of "Ours w/o cond" and the original audio. Our model changes the ambient sound level according to the input ambient (shown in Fig. 7a) while still synthesizing plausible action sounds (shown in Fig. 7b). FAD spikes when the condition ambient is too low or too high, most likely because the generated ambient sound is out of distribution since the original audio always has some ambient sounds.
|
| 228 |
+
|
| 229 |
+
Fig. 8 shows example outputs from our model and several baselines. The examples show how our model generates plausible action sounds when conditioned on a low-ambient sound for action-focused generation. We can see that the action-focused setting generates similar action sounds as the action-ambient setting while having a minimal ambient level. While by definition we lack a good evaluation of this setting (there is no ground truth audio source separation for the data), our model shows an emerging capability of generating clean action sounds although it has never been explicitly trained to do so.
|
| 230 |
+
|
| 231 |
+
# 5.3 Human Evaluation
|
| 232 |
+
|
| 233 |
+
To further validate the performance of various models, we conduct a subjective human evaluation. In each survey, we provide 30 questions, each with 5 videos
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
Fig. 8: Visualization of action-focused generation. For both examples, Diff-Foley [38], Ours w/o cond or Ours (action-ambient generation) generate plausible action sounds along with ambient sounds. In contrast, our model conditioned on a low ambient sound generates plausible action sounds (see green boxes) with minimal ambient sound.
|
| 237 |
+
|
| 238 |
+
<table><tr><td></td><td>Action sound quality</td><td>Least ambient sound</td></tr><tr><td>Retrieval</td><td>12.5%</td><td>12.5%</td></tr><tr><td>Diff-Foley [38]</td><td>47.5%</td><td>12.5%</td></tr><tr><td>AV-LDM w/o cond</td><td>55.0%</td><td>17.5%</td></tr><tr><td>AV-LDM (action-focused)</td><td>60.0%</td><td>97.5%</td></tr><tr><td>AV-LDM (action-ambient)</td><td>72.5%</td><td>22.5%</td></tr></table>
|
| 239 |
+
|
| 240 |
+
Table 3: Survey results showing user preferences. Higher is better. Our model in the action-ambient joint generation setting scores highest for action sound quality, showing its ability to produce action-relevant sounds despite training with in-the-wild data. Ours in the action-focused generation setting scores highest for the least ambient sound, at a slight drop in action sound quality score, showing the ability to eliminate background sounds when requested by the user.
|
| 241 |
+
|
| 242 |
+
with the same visuals but different audio samples. For each video, we ask the participant to select the video(s) whose audio 1) is most semantically plausible and temporally synchronized with the video and 2) has the least ambient sounds. We invite 20 participants to complete the survey and compute the average voting for all 30 examples. See the survey interface and guidelines in Supp.
|
| 243 |
+
|
| 244 |
+
Tab. 3 shows the results. All learning-based methods generate reasonable action sounds, yet our model (action-ambient) has the highest score for action-sound quality compared to other methods. Although ours (action-focused) has a slightly lower action-sound score, it has significantly less ambient sound. This is likely because sometimes the low-ambient condition can lead the model to suppress some minor action sounds.
|
| 245 |
+
|
| 246 |
+
Overall, our model generates both short percussive and longer harmonic action sounds while producing desired ambient sounds controlled by users. The model can fail sometimes in predicting more subtle action sounds, however. See Supp. video for both success and failure examples.
|
| 247 |
+
|
| 248 |
+
# 5.4 Results on EPIC-KITCHENS
|
| 249 |
+
|
| 250 |
+
To evaluate whether our model generalizes to other datasets, we also test our model on the EPIC-KITCHENS dataset. We first sample 1000 3s clips from EPIC and then evaluate the retrieval baseline, Diff-Foley, Ours w/o cond, and our full model on these data and then compute their FAD and AV-Sync scores.
|
| 251 |
+
|
| 252 |
+
<table><tr><td></td><td>GT</td><td>Retrieval</td><td>Diff-Foley</td><td>Ours w/o cond</td><td>AV-LDM (Ours)</td></tr><tr><td>FAD ↓</td><td>0.0000</td><td>1.9618</td><td>3.4649</td><td>1.4731</td><td>1.3200</td></tr><tr><td>AV-Sync (%) ↑</td><td>73.94</td><td>13.84</td><td>14.19</td><td>50.42</td><td>59.26</td></tr></table>
|
| 253 |
+
|
| 254 |
+
Table 4: Results on EPIC-KITCHENS. GT stands for Ground Truth.
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
Fig. 9: We apply our model on a VR cooking game clip where the person cuts a sushi roll three times. Our model successfully predicts the 3 cutting sounds.
|
| 258 |
+
|
| 259 |
+
Tab. 4 shows the results. Similar to what we observe on Ego4D-Sounds, our model outperforms other models by a large margin, showing it better learns to generate action sounds from visuals, even when transferring to another dataset.
|
| 260 |
+
|
| 261 |
+
# 5.5 Demo on VR Cooking Game
|
| 262 |
+
|
| 263 |
+
One compelling application of action-to-sound generation is to generate sound effects for games in virtual reality, where simulating complex hand-object interactions is non-trivial. To examine whether our learned model generalizes to VR games, we collect game videos of a cooking VR game "Clash Of Chefs" from YouTube and test our model without fine-tuning. Preliminary results suggest our model can generate synced action sounds (see Fig. 9 and Supp). Though there remains much work to do, this suggests a promising future in learning action-to-sound models from real-world egocentric videos and applying them to VR games to give a game user an immersive audio-visual experience that dynamically adjusts to their own actions.
|
| 264 |
+
|
| 265 |
+
# 6 Conclusion
|
| 266 |
+
|
| 267 |
+
We investigate the problem of generating sounds for human actions in egocentric videos. We propose an ambient-aware approach that disentangles the action sound from the ambient sound, allowing successful generation after training with diverse in-the-wild data, as well as controllable conditioning on ambient sound levels. We show that our model outperforms existing methods and baselines—both quantitatively and through human subject studies. Overall, it significantly broadens the scope of relevant training sources for achieving action-precise sound generation. In future work we aim to explore the possibilities for sim2real translation of our learned audio generation models to synthetic imagery inputs, e.g., for VR game applications.
|
| 268 |
+
|
| 269 |
+
Acknowledgments: UT Austin is supported in part by the IFML NSF AI Institute. Wei-Ning Hsu helped advise the project only, and all the work and data processing were done outside of Meta.
|
| 270 |
+
|
| 271 |
+
# References
|
| 272 |
+
|
| 273 |
+
1. Blattmann, A., Rombach, R., Oktay, K., Ommer, B.: Retrieval-augmented diffusion models. ArXiv abs/2204.11824 (2022), https://api(semanticscholar.org/CorpusID:248377386
|
| 274 |
+
2. Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., van den Driessche, G., Lespiau, J.B., Damoc, B., Clark, A., de Las Casas, D., Guy, A., Menick, J., Ring, R., Hennigan, T.W., Huang, S., Maggiore, L., Jones, C., Cassirer, A., Brock, A., Paganini, M., Irving, G., Vinyals, O., Osindero, S., Simonyan, K., Rae, J.W., Elsen, E., Sifre, L.: Improving language models by retrieving from trillions of tokens. In: International Conference on Machine Learning (2021), https://api_semanticscholar.org/CorpusID:244954723
|
| 275 |
+
3. Chen, C., Ashutosh, K., Girdhar, R., Harwath, D., Grauman, K.: Soundingactions: Learning how actions sound from narrated egocentric videos. In: CVPR (2024)
|
| 276 |
+
4. Chen, C., Schissler, C., Garg, S., Kobernik, P., Clegg, A., Calamia, P., Batra, D., Robinson, P.W., Grauman, K.: Soundspaces 2.0: A simulation platform for visual-acoustic learning. In: NeurIPS (2023)
|
| 277 |
+
5. Chen, H., Xie, W., Vedaldi, A., Zisserman, A.: Vggsound: A large-scale audio-visual dataset. In: ICASSP (2020)
|
| 278 |
+
6. Chen, P., Zhang, Y., Tan, M., Xiao, H., Huang, D., Gan, C.: Generating visually aligned sound from videos. TIP (2020)
|
| 279 |
+
7. Chen, W., Hu, H., Sahara, C., Cohen, W.W.: Re-imagen: Retrieval-augmented text-to-image generator. ArXiv abs/2209.14491 (2022), https://apisemantic scholar.org/CorpusID:252596087
|
| 280 |
+
8. Clarke, S., Gao, R., Wang, M., Rau, M., Xu, J., Wang, J.H., James, D.L., Wu, J.: Realimpact: A dataset of impact sound fields for real objects. In: Proceedings of the IEEE International Conference on Computer Vision and Pattern Recognition (2023)
|
| 281 |
+
9. Clarke, S., Heravi, N., Rau, M., Gao, R., Wu, J., James, D., Bohg, J.: Diffimpact: Differentiable rendering and identification of impact sounds. In: 5th Annual Conference on Robot Learning (2021)
|
| 282 |
+
0. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perretti, T.: Scaling egocentric vision: The epic-kitchens dataset. In: ECCV (2018)
|
| 283 |
+
1. Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. ArXiv abs/2105.05233 (2021), https://api(semanticscholar.org/CorpusID: 234357997
|
| 284 |
+
2. Du, Y., Chen, Z., Salamon, J., Russell, B., Owens, A.: Conditional generation of audio from video via foley analogies. In: 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2426-2436 (2023)
|
| 285 |
+
3. Fabian Caba Heilbron, Victor Escorcia, B.G., Niebles, J.C.: Activitynet: A large-scale video benchmark for human activity understanding. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 961-970 (2015)
|
| 286 |
+
4. Gan, C., Schwartz, J., Alter, S., Mrowca, D., Schrimpf, M., Traer, J., De Freitas, J., Kubilius, J., Bhandwaldar, A., Haber, N., Sano, M., Kim, K., Wang, E., Lingelbach,
|
| 287 |
+
|
| 288 |
+
M., Curtis, A., Feigelis, K., Bear, D.M., Gutfreund, D., Cox, D., Torralba, A., DiCarlo, J.J., Tenenbaum, J.B., McDermott, J.H., Yamins, D.L.K.: Threadworld: A platform for interactive multi-modal physical simulation. In: NeurIPS Datasets and Benchmarks Track (2021)
|
| 289 |
+
15. Gandhi, D., Gupta, A., Pinto, L.: Swoosh! rattle! thump! - actions that sound. In: RSS (2022)
|
| 290 |
+
16. Gemmeke, J.F., Ellis, D.P.W., Freedman, D., Jansen, A., Lawrence, W., Moore, R.C., Plakal, M., Ritter, M.: Audio set: An ontology and human-labeled dataset for audio events. In: 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 776-780 (2017)
|
| 291 |
+
17. Girdhar, R., El-Nouby, A., Liu, Z., Singh, M., Alwala, K.V., Joulin, A., Misra, I.: Imagebind: One embedding space to bind them all. In: CVPR (2023)
|
| 292 |
+
18. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., Martin, M., Nagarajan, T., Radosavovic, I., Ramakrishnan, S.K., Ryan, F., Sharma, J., Wray, M., Xu, M., Xu, E.Z., Zhao, C., Bansal, S., Batra, D., Cartillier, V., Crane, S., Do, T., Doulaty, M., Erapalli, A., Feichtenhofer, C., Fragomeni, A., Fu, Q., Gebreselasie, A., Gonzalez, C., Hillis, J., Huang, X., Huang, Y., Jia, W., Khoo, W., Kolar, J., Kottur, S., Kumar, A., Landini, F., Li, C., Li, Y., Li, Z., Mangalam, K., Modhugu, R., Munro, J., Murrell, T., Nishiyasu, T., Price, W., Puentes, P.R., Ramazanova, M., Sari, L., Somasundaram, K., Southerland, A., Sugano, Y., Tao, R., Vo, M., Wang, Y., Wu, X., Yagi, T., Zhao, Z., Zhu, Y., Arbelaez, P., Crandall, D., Damen, D., Farinella, G.M., Fuegen, C., Ghanem, B., Ithapu, V.K., Jawahar, C.V., Joo, H., Kitani, K., Li, H., Newcombe, R., Oliva, A., Park, H.S., Rehg, J.M., Sato, Y., Shi, J., Shou, M.Z., Torralba, A., Torresani, L., Yan, M., Malik, J.: Ego4d: Around the world in 3,000 hours of egocentric video. In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 18973-18990 (2022)
|
| 293 |
+
19. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., Byrne, E., Chavis, Z., Chen, J., Cheng, F., Chu, F.J., Crane, S., Dasgupta, A., Dong, J., Escobar, M., Forigua, C., Gebreselasie, A., Haresh, S., Huang, J., Islam, M.M., Jain, S., Khirodkar, R., Kukreja, D., Liang, K.J., Liu, J.W., Majumder, S., Mao, Y., Martin, M., Mavroudi, E., Nagarajan, T., Ragusa, F., Ramakrishnan, S.K., Seminar, L., Somayazulu, A., Song, Y., Su, S., Xue, Z., Zhang, E., Zhang, J., Castillo, A., Chen, C., Fu, X., Furuta, R., Gonzalez, C., Gupta, P., Hu, J., Huang, Y., Huang, Y., Khoo, W., Kumar, A., Kuo, R., Lakhavani, S., Liu, M., Luo, M., Luo, Z., Meredith, B., Miller, A., Oguntola, O., Pan, X., Peng, P., Pramanick, S., Ramazanova, M., Ryan, F., Shan, W., Somasundaram, K., Song, C., Southerland, A., Tateno, M., Wang, H., Wang, Y., Yagi, T., Yan, M., Yang, X., Yu, Z., Zha, S.C., Zhao, C., Zhao, Z., Zhu, Z., Zhuo, J., Arbelaez, P., Bertasius, G., Crandall, D., Damen, D., Engel, J., Farinella, G.M., Furnari, A., Ghanem, B., Hoffman, J., Jawahar, C.V., Newcombe, R., Park, H.S., Rehg, J.M., Sato, Y., Savva, M., Shi, J., Shou, M.Z., Wray, M.: Ego-exo4d: Understanding skilled human activity from first- and third-person perspectives. In: CVPR (2024)
|
| 294 |
+
20. Guu, K., Lee, K., Tung, Z., Pasupat, P., Chang, M.W.: Realm: Retrievalaugmented language model pre-training. ArXiv abs/2002.08909 (2020), https://api.sementicscholar.org/CorpusID:211204736
|
| 295 |
+
21. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS (2020)
|
| 296 |
+
22. Ho, J., Salimans, T.: Classifier-free diffusion guidance (2022)
|
| 297 |
+
|
| 298 |
+
23. Huang, C., Tian, Y., Kumar, A., Xu, C.: Egocentric audio-visual object localization. In: CVPR (2023)
|
| 299 |
+
24. Huang, R., Huang, J.B., Yang, D., Ren, Y., Liu, L., Li, M., Ye, Z., Liu, J., Yin, X., Zhao, Z.: Make-an-audio: Text-to-audio generation with prompt-enhanced diffusion models. ArXiv abs/2301.12661 (2023), https://api_semanticscholar.org/CorpusID:256390046
|
| 300 |
+
25. Huh, J., Chalk, J., Kazakos, E., Damen, D., Zisserman, A.: Epic-sounds: A large-scale dataset of actions that sound. In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 1-5 (2023)
|
| 301 |
+
26. Iashin, V., Rahtu, E.: Taming visually guided sound generation. In: BMVC (2021)
|
| 302 |
+
27. Jiang, H., Murdock, C., Ithapu, V.K.: Egocentric deep multi-channel audio-visual active speaker localization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10544–10552 (2022)
|
| 303 |
+
28. Kay, W., Carreira, J., Simonyan, K., Zhang, B., Hillier, C., Vijayanarasimhan, S., Viola, F., Green, T., Back, T., Natev, P., Suleyman, M., Zisserman, A.: The kinetics human action video dataset. CoRR abs/1705.06950 (2017), http:// arxiv.org/abs/1705.06950
|
| 304 |
+
29. Kazakos, E., Nagrani, A., Zisserman, A., Damen, D.: Epic-fusion: Audio-visual temporal binding for egocentric action recognition. In: 2019 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 5491-5500 (2019)
|
| 305 |
+
30. Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., Lewis, M.: Generalization through memorization: Nearest neighbor language models. ArXiv abs/1911.00172 (2019), https://api.sementicscholar.org/CorpusID:207870430
|
| 306 |
+
31. Kilgour, K., Zuluaga, M., Roblek, D., Sharifi, M.: Fréchet audio distance: A metric for evaluating music enhancement algorithms. arxiv (2018)
|
| 307 |
+
32. Kong, J., Kim, J., Bae, J.: Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis. Advances in Neural Information Processing Systems 33, 17022-17033 (2020)
|
| 308 |
+
33. Kong, Z., Ping, W., Huang, J., Zhao, K., Catanzaro, B.: Diffwave: A versatile diffusion model for audio synthesis. ArXiv abs/2009.09761 (2020), https://api-semanticscholar.org/CorpusID:221818900
|
| 309 |
+
34. Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., Kuttler, H., Lewis, M., tau Yih, W., Roktaschel, T., Riedel, S., Kiela, D.: Retrievalaugmented generation for knowledge-intensive nlp tasks. ArXiv abs/2005.11401 (2020), https://apisemantic scholar.org/CorpusID:218869575
|
| 310 |
+
35. Lin, K.Q., Wang, J., Soldan, M., Wray, M., Yan, R., Xu, Z., Gao, D., Tu, R.C., Zhao, W., Kong, W., Cai, C., HongFa, W., Damen, D., Ghanem, B., Liu, W., Shou, M.Z.: Egocentric video-language pretraining. In: Advances in Neural Information Processing Systems (2022)
|
| 311 |
+
36. Liu, H., Chen, Z., Yuan, Y., Mei, X., Liu, X., Mandic, D.P., Wang, W., Plumbley, M.: Audioldm: Text-to-audio generation with latent diffusion models. In: International Conference on Machine Learning (2023), https://apisemantic scholar.org/CorpusID:256390486
|
| 312 |
+
37. Lu, C., Zhou, Y., Bao, F., Chen, J., Li, C., Zhu, J.: Dpm-solver: A fast ode solver for diffusion probabilistic model sampling in around 10 steps. arXiv preprint arXiv:2206.00927 (2022)
|
| 313 |
+
38. Luo, S., Yan, C., Hu, C., Zhao, H.: Diff-foley: Synchronized video-to-audio synthesis with latent diffusion models. In: NeurIPS (2023)
|
| 314 |
+
39. Majumder, S., Al-Halah, Z., Grauman, K.: Learning spatial features from audiovisual correspondence in egocentric videos. In: CVPR (2024)
|
| 315 |
+
|
| 316 |
+
40. Mittal, H., Morgado, P., Jain, U., Gupta, A.: Learning state-aware visual representations from audible interactions. In: Oh, A.H., Agarwal, A., Belgrave, D., Cho, K. (eds.) Advances in Neural Information Processing Systems (2022), https://openreview.net/forum?id=AhbTKB1M7X
|
| 317 |
+
41. Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., Chen, M.: Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In: International Conference on Machine Learning (2021), https://api-semanticscholar.org/CorpusID:245335086
|
| 318 |
+
42. Owens, A., Isola, P., McDermott, J., Torralba, A., Adelson, E.H., Freeman, W.T.: Visually indicated sounds. In: CVPR (2016)
|
| 319 |
+
43. Popov, V., Vovk, I., Gogoryan, V., Sadekova, T., Kudinov, M.A.: Grad-tts: A diffusion probabilistic model for text-to-speech. In: International Conference on Machine Learning (2021), https://api-semanticscholar.org/CorpusID:234483016
|
| 320 |
+
44. Ramazanova, M., Escorcia, V., Heilbron, F.C., Zhao, C., Ghanem, B.: Owl (observe, watch, listen): Localizing actions in egocentric video via audiovisual temporal context (2022)
|
| 321 |
+
45. Saharia, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.L., Ghasemipour, S.K.S., Ayan, B.K., Mahdavi, S.S., Lopes, R.G., Salimans, T., Ho, J., Fleet, D.J., Norouzi, M.: Photorealistic text-to-image diffusion models with deep language understanding. ArXiv abs/2205.11487 (2022), https://api-semanticscholar.org/CorpusID:248986576
|
| 322 |
+
46. Song, Y., Ermon, S.: Generative modeling by estimating gradients of the data distribution. In: Neural Information Processing Systems (2019), https://apisemantic scholar.org/CorpusID:196470871
|
| 323 |
+
47. Soomro, K., Zamir, A.R., Shah, M.: Ucf101: A dataset of 101 human actions classes from videos in the wild. CoRR (2012)
|
| 324 |
+
48. Su, K., Qian, K., Shlizerman, E., Torralba, A., Gan, C.: Physics-driven diffusion models for impact sound synthesis from videos. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 9749-9759 (2023), https://api-semanticscholar.org/CorpusID:257805229
|
| 325 |
+
49. Wang, D., Chen, J.: Supervised speech separation based on deep learning: An overview. arxiv (201)
|
| 326 |
+
50. Wu*, Y., Chen*, K., Zhang*, T., Hui*, Y., Berg-Kirkpatrick, T., Dubnov, S.: Large-scale contrastive language-audio pretraining with feature fusion and keyword-to-caption augmentation. In: IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP (2023)
|
| 327 |
+
51. Yang, D., Yu, J., Wang, H., Wang, W., Weng, C., Zou, Y., Yu, D.: Diffsound: Discrete diffusion model for text-to-sound generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing 31, 1720-1733 (2022), https://api-semanticscholar.org/CorpusID:250698823
|
action2soundambientawaregenerationofactionsoundsfromegocentricvideos/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a50a282622146e98aab74c33cff02c01a05bb93c262fb9ad8952bc4ecaa2e5f4
|
| 3 |
+
size 572160
|
action2soundambientawaregenerationofactionsoundsfromegocentricvideos/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:405bfa07b808fbe78f2dc257f0e2486fdf39b0988986c317a5e4ea94fa84c895
|
| 3 |
+
size 419635
|
actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/1c1c678d-9f58-4921-b267-ac0f538d6306_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b1ce32d053fa39e1177fe335acb58431cdfd1935e82b838b5eccb1d29baff47
|
| 3 |
+
size 81110
|
actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/1c1c678d-9f58-4921-b267-ac0f538d6306_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce4f5ac93ea22bcf23df6d0c2a72081bfe70cc77eb9cf4266fe8e8756cf437e1
|
| 3 |
+
size 101047
|
actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/1c1c678d-9f58-4921-b267-ac0f538d6306_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:86a237741096a2e7caeeb46670efd62e86e395df03d5fba9f8e805f9a217de35
|
| 3 |
+
size 2939744
|
actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/full.md
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ActionSwitch: Class-agnostic Detection of Simultaneous Actions in Streaming Videos
|
| 2 |
+
|
| 3 |
+
Hyolim Kang, Jeongseok Hyun, Joungbin An, Youngjae Yu, and Seon Joo Kim
|
| 4 |
+
|
| 5 |
+
Yonsei University
|
| 6 |
+
|
| 7 |
+
Abstract. Online Temporal Action Localization (On-TAL) is a critical task that aims to instantaneously identify action instances in untrimmed streaming videos as soon as an action concludes—a major leap from frame-based Online Action Detection (OAD). Yet, the challenge of detecting overlapping actions is often overlooked even though it is a common scenario in streaming videos. Current methods that can address concurrent actions depend heavily on class information, limiting their flexibility. This paper introduces ActionSwitch, the first class-agnostic On-TAL framework capable of detecting overlapping actions. By obviating the reliance on class information, ActionSwitch provides wider applicability to various situations, including overlapping actions of the same class or scenarios where class information is unavailable. This approach is complemented by the proposed "conservativeness loss", which directly embeds a conservative decision-making principle into the loss function for On-TAL. Our ActionSwitch achieves state-of-the-art performance in complex datasets, including Epic-Kitchens 100 targeting the challenging egocentric view and FineAction consisting of fine-grained actions.
|
| 8 |
+
|
| 9 |
+
Keywords: Online Video Understanding $\cdot$ Class-agnostic Detection
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
As the demand for autonomous driving systems [22], robotics applications [27], and ego-centric perception [9] continues to grow, the importance of online video understanding tasks has become more pronounced. Online Action Detection (OAD) [10] is a pivotal online video understanding task that has gained considerable attention [1,13,15,24,45,48,49], as it aims to detect the action class of each frame from a streaming video. However, merely assigning class scores to individual frames does not capture the essence of actions in the video, which inherently extend over multiple frames and form action instances. In particular, this instance-level understanding is a de facto standard in conventional video understanding tasks, including Temporal Action Localization (TAL) [28,31,37,47,53].
|
| 14 |
+
|
| 15 |
+
To provide instance-level understanding in streaming videos, Online Temporal Action Localization (On-TAL) [20, 23, 25, 41] has recently been proposed. Its goal is to instantaneously identify action instances in untrimmed streaming video, accurately determining the start and end times, and classifying each instance as soon as it concludes.
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Fig. 1: Overview of the ActionSwitch Framework: State label is derived from the sum of the ids of activated switches. For example, the state is labeled as '3' between t2 and t3 when switches '1' and '2' are simultaneously active, whereas it registers as '2' from t3 to t4 when only switch '2' is active. State changes signify action instance boundaries, and our 'conservativeness loss' minimizes state fluctuations to improve detection accuracy.
|
| 19 |
+
|
| 20 |
+
While the use of (soft-) non-maximum suppression (NMS) [3] is indispensable in conventional TAL, the online constraints of On-TAL preclude any retrospective modifications after the initial generation of action instances. This requisite, coupled with the need for instant detection, poses challenges in directly applying TAL methods to On-TAL.
|
| 21 |
+
|
| 22 |
+
Thus, a straightforward approach is extending the OAD framework; this involves thresholding the per-frame output from the OAD model and aggregating these outputs to generate action instances. Nonetheless, a simple threshold-based grouping of either a class-agnostic or class-aware OAD model gives rise to two significant challenges: i) the difficulty in detecting actions occurring simultaneously and ii) the generation of fragmented and noisy action instances [20]. The main objective of this work is to explore these challenges and offer effective solutions.
|
| 23 |
+
|
| 24 |
+
Detecting overlapping action instances in streaming videos, despite their frequent occurrence, remains relatively underexplored. This aspect holds considerable importance as it harmonizes with the intuition that actions are intricately connected [51]. In particular, an extension of a class-agnostic OAD model [20] is entirely incapable of recognizing concurrent actions. While a class-aware model [41] may attempt to do so, it faces considerable limitations. This is largely because class-aware models rely on independently grouping per-class scores to generate action instances. As a result, their ability to detect overlapping instances is intricately tied to the predefined action classes in a dataset. The Epic-Kitchens dataset [9], with its many overlapping instances within the same class, highlights this limitation that class-aware OAD models struggle to address. Additionally, these models are highly sensitive to predetermined thresholds, a
|
| 25 |
+
|
| 26 |
+
challenge that intensifies with an increase in the number of classes. Moreover, the dependence on class information to distinguish action instances presents difficulties in general On-TAL applications, especially when predefining all possible action classes is not practical. The unique advantage of disentangling class information from action proposal generation is further elaborated in Sec. 2.3.
|
| 27 |
+
|
| 28 |
+
To this end, we propose the first class-agnostic On-TAL framework capable of detecting overlapping action instances. We begin by considering a machine with multiple switches (Fig. 4), namely ActionSwitch. Each switch performs class-agnostic yet mutually exclusive action instance detection where the activated switch indicates detecting ongoing action. By incorporating multiple switches, our system adeptly detects overlapping instances all without necessitating class information. We devise a finite state machine corresponding to our ActionSwitch framework to instantiate this concept. Leveraging a conventional OAD framework that outputs state labels for each frame, we create what we refer to as a state-emitting OAD. We can effectively generate action instances online by appropriately grouping the framewise output of the state-emitting OAD model, even if there are overlaps among them.
|
| 29 |
+
|
| 30 |
+
Another remaining challenge is noisy and fragmented action proposal generation, a byproduct of the discrepancy between the frame-centric nature of OAD and the instance-level demands of On-TAL. From an instance-level perspective, even a single erroneous decision can shatter the continuity of an action instance, which is particularly problematic when identifying long action instances. Consequently, the agent must be conservative in altering its decisions. Past approaches [20, 41] learned this principle from data by modeling decision context, yet we advocate for a more direct method: directly infuse conservatism into the loss function. We propose a Conservativeness loss, an auxiliary loss term that encourages the agent to depend on its previous decision. This method integrates conservatism into the standard loss function without necessitating complex architectural modifications.
|
| 31 |
+
|
| 32 |
+
We conduct comprehensive experiments on three primary action localization datasets [9, 19, 32] to validate our proposed method's efficacy and establish robust baselines for future research of On-TAL. It is also worth noting that while the majority of action localization datasets (including the above three) require the ActionSwitch framework with just two switches, three or more switch configurations are also tested in MultiTHUMOS [51] dataset, providing valuable insight for further research.
|
| 33 |
+
|
| 34 |
+
Our contributions can be summarized as follows:
|
| 35 |
+
|
| 36 |
+
- We introduce ActionSwitch, the first class-agnostic On-TAL framework that is capable of detecting overlapping action instances by incorporating a finite state machine concept.
|
| 37 |
+
- We introduce Conservativeness loss to address the challenge of noisy and fragmented action proposals, effectively incorporating conservatism directly into the loss function with minimal modification.
|
| 38 |
+
|
| 39 |
+
- We demonstrate the effectiveness of ActionSwitch through experimental results from multiple action localization datasets [9,19,30,32,51] and extensive ablation studies.
|
| 40 |
+
|
| 41 |
+
# 2 Related Work
|
| 42 |
+
|
| 43 |
+
# 2.1 Streaming Video Understanding
|
| 44 |
+
|
| 45 |
+
Online Action Detection (OAD) [10] is a well-established task in online video understanding that requires identifying the action class of the current input frame in a streaming setting. A plethora of work [1,6,11,45,48-50,55] has been introduced, mainly focusing on temporal modeling of the past visual context. Despite the progress, the frame-centric approach of OAD and its evaluation with per-frame mean Average Precision (mAP) falls short of addressing the needs of real-world streaming video analysis that necessitates instance-level recognition.
|
| 46 |
+
|
| 47 |
+
Online Detection of Action Start (ODAS) [14, 36], on the other hand, is a task that pinpoints the initial timestep of each action within a streaming video. By retrieving one unique timestep for each action instance, ODAS implicitly conveys the concept of action instances. However, its concentration solely on the initiation point of action instance restricts its broader applicability.
|
| 48 |
+
|
| 49 |
+
# 2.2 Online Temporal Action Localization
|
| 50 |
+
|
| 51 |
+
Online Temporal Action Localization (On-TAL) targets real-time identification of action instances in streaming video. A direct method involves extending the OAD framework [20, 41] for On-TAL, which necessitates incorporating the agent's decision history to produce accurate action instances. Several approaches have been devised to tackle this challenge, including the incorporation of a unique grouping module complemented by a distinct training strategy [20], as well as the implementation of a decision context token [41]. The exact processes for generating action instances in these methods are detailed further in our supplementary materials to make the paper self-contained.
|
| 52 |
+
|
| 53 |
+
On the other hand, the TAL extension has been adapted for On-TAL using data-driven online filtering [23], which mimics the online version of NMS. Yet, all these methods rely on class-specific information to separate concurrent actions. For example, SimOn [41] groups the same class decisions to generate action instances whereas OAT [23] uses a handcrafted threshold value to explicitly suppress overlapping action instances of the same class, indicating that neither can handle overlapping action instances that have the same action class. In contrast, our ActionSwitch framework is the first On-TAL approach to identify overlapping actions irrespective of class information.
|
| 54 |
+
|
| 55 |
+
# 2.3 Class-agnostic Detection
|
| 56 |
+
|
| 57 |
+
Class-agnostic proposal generation is already a widely accepted standard in both Object Detection (OD) [8, 16, 40] and TAL [28, 47, 52] literature, and recent
|
| 58 |
+
|
| 59 |
+
works highlight the unique benefits of disentangling the proposal generation and classification. OLN [21] suggests that focusing on localization without class constraints enhances generalization, and Maaz et al. [33] show benefits of class-agnostic detector for open-world detection and self-supervised learning [2]. In videos, recognizing class-agnostic event boundaries, as discussed in [35], aligns with human perception, which does not depend on predefined action classes. These recent advances [18, 44] and flourishing development of video-language models [34] validate our approach to separate action proposal generation from classification in On-TAL, indicating the potential for a more flexible framework suited to open-world and open-vocabulary contexts.
|
| 60 |
+
|
| 61 |
+
# 3 Methodology
|
| 62 |
+
|
| 63 |
+
# 3.1 Problem Setting
|
| 64 |
+
|
| 65 |
+
On-TAL Let us consider an untrimmed video $V = \{x_{\tau}\}_{\tau=1}^{T}$ with $M$ action instances $\Psi = \{\psi_m\}_{m=1}^M = \{(t_m^s, t_m^e, c_m)\}_{m=1}^M$ is given in a streaming format. $x_{\tau}$ indicates $\tau$ th frame, $t_m^s, t_m^e$ represent the start and the end timestep, and $c_m$ is the class label of the $m$ th action instance $\psi_m$ . Following the previous convention [20, 28, 45, 47], consecutive $k$ frames are converted to a $D$ dimensional visual feature sequence $f \in \mathbb{R}^{\lfloor \frac{T}{k} \rfloor \times D}$ with a pretrained snippet encoder. Subsequent operations, including online action instance generation, are performed on this feature sequence. The goal of On-TAL task is to generate and accumulate action proposals $\psi$ as soon as their completion is detected, aiming to reconstruct $\Psi$ without retrospective modification to each $\psi$ .
|
| 66 |
+
|
| 67 |
+
OAD-extension vs TAL-extension While On-TAL requires immediate identification of action endpoints, it allows flexible timing in identifying action startpoints. This flexibility is exploited by the TAL-based approach [23]; it identifies action starts at the time of action instance generation, thereby benefiting from rich context. In contrast, OAD-based methods [20, 41] are inherently designed to recognize the start of actions in real-time, often working within a highly constrained initial context. This distinction emphasizes the unique role of OAD-based methods in On-TAL. They are able to adeptly handle early action detection, a task TAL-based methods cannot perform. In particular, promptly identifying an action's start can extend the On-TAL model's utility to scenarios where immediate detection of an action's initiation is critical, an aspect tackled in Online Detection of Action Start (ODAS) [36].
|
| 68 |
+
|
| 69 |
+
# 3.2 State-emitting OAD Model
|
| 70 |
+
|
| 71 |
+
In order to address the class-agnostic On-TAL problem, we begin by extending class-agnostic OAD models, following the baseline approach described in [20]. In this setting, the OAD model produces a framewise actionness decision; 0 indicates the absence of action, and 1 the presence of action. Action start and end can be instantaneously determined by detecting changes from 0 to 1 and 1 to
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
(a)
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
(b)
|
| 78 |
+
Fig. 2: (a) State diagram of ActionSwitch framework. Some connections are omitted for simplicity. (b) Overall architecture of state-emitting OAD model.
|
| 79 |
+
|
| 80 |
+
0 respectively. Additionally, the progress status of an action, which is helpful for many practical applications, can be easily calculated by measuring the distance between the action's start timestep and the current timestep.
|
| 81 |
+
|
| 82 |
+
The primary challenge with this approach is its inability to detect overlapping action instances, as decisions are based solely on the presence of the action in the current frame. Aggregation of these binary action decisions tends to merge multiple overlapping instances into a single instance. A possible solution is to use multiple OAD models for mutually exclusive action instance detection. For instance, in a setup with two OAD models and a single action, one model should signal "no action" when the other detects the action. However, this interdependence of the models' decisions makes the implementation complicated.
|
| 83 |
+
|
| 84 |
+
Instead, we abstract the concept into a single machine with multiple switches (Fig. 2 (a)). A two-switch configuration is exemplified for simplicity, although configurations with three or more switches are also feasible (See Section 4.5). Switch 1 is activated when the first action is detected, and Switch 2 is activated when the other action is detected while Switch 1 is still active. The finite state machine corresponding to this machine has four states: i) no switch activated, ii) switch 1 activated, iii) switch 2 activated, and iv) both switches activated. These states are illustrated in the State Table in Fig. 2 (a). State transitions here indicate the commencement or termination of a certain action instance, enabling the model to determine the start, end, or progress status of multiple ongoing actions in real-time by analyzing the stored state history.
|
| 85 |
+
|
| 86 |
+
In order to realize this concept using neural networks, we need to make an important design choice regarding the interpretation of the network's output. For simplicity, we directly interpret the output of the network as the state label, yielding a state-emitting OAD model (Fig. 2 (b)). At each timestep $t$ , $f_{t} \in \mathbb{R}^{D}$ is fed into the uni-directional sequential encoder, producing hidden state $g_{t} \in \mathbb{R}^{D}$ . Subsequently, the categorical probability distribution $p_t$ is generated by $p_t = \text{softmax}(\mathsf{SC}(g_t))$ , where $\mathsf{SC}$ denotes a State Classifier $\mathsf{SC}: \mathbb{R}^D \to \mathbb{R}^S$ that can be any arbitrary neural network, with $S$ representing the number of
|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
Fig. 3: Training process in ActionSwitch. $CE$ and $\mathcal{L}_c$ denote the terms in Eq. 2. GT state and conservative pseudo-state are used for training. GT states are encoded from GT action instances while the pseudo-states come from the model's own predictions. At t6, action 1 is temporarily lost and results in the fragmentation of the action instance. However, with our conservativeness loss, the output of action instances becomes robust against such fragmentation (t6) and noisy output (t11).
|
| 90 |
+
|
| 91 |
+
Algorithm 1 Conservativeness loss in a PyTorch style.
|
| 92 |
+
```txt
|
| 93 |
+
logits: tensor in the shape of (B, L, n_state)
|
| 94 |
+
# return loss penalized by context change (cc)
|
| 95 |
+
pred_state = torch.argmax(logits, dim=2) # (B, L)
|
| 96 |
+
cc_mask = pred_state[:, 1:] != pred_state[:, :-1]
|
| 97 |
+
cc_targets = pred_state[:, :-1][cc_mask]
|
| 98 |
+
cc_logits = logits[:, 1][cc_mask]
|
| 99 |
+
return F-cross_entropy(cc_logits, cc_targets)
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
states. The state corresponding to the current timestep $t$ is obtained by simply calculating $s_t = \operatorname{argmax}(p_t)$ . Note that $\operatorname{argmax}$ operation here eliminates the need for handcrafted threshold selection, which was the primary problem of previous works [23, 41].
|
| 103 |
+
|
| 104 |
+
# 3.3 Conservativeness Loss
|
| 105 |
+
|
| 106 |
+
Action boundaries are typically much less frequent than non-boundary frames, which is a widely observed phenomenon in most videos. It is evident that leveraging this prior is advantageous in grouping framewise OAD decisions in generating action instances. Previous methods [20,41] attempted to learn this prior by modeling decision history. Our approach, on the other hand, involves encoding it directly into the loss function, thereby leading to the novel conservativeness loss term $\mathcal{L}_c$ :
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\mathcal {L} _ {c} \left(p _ {t}, s _ {t - 1}\right) = \left\{ \begin{array}{c l} - \log \left(p _ {t} \left[ s _ {t - 1} \right]\right), & \text {i f} \operatorname {a r g m a x} \left(p _ {t}\right) \neq s _ {t - 1}; \\ 0, & \text {o t h e r w i s e .} \end{array} \right. \tag {1}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
This loss term penalizes state change by applying standard cross-entropy loss with a pseudo-label $s_{t - 1}$ , an output from the model's own prediction in the
|
| 113 |
+
|
| 114 |
+
previous step. Note that this loss term is only imposed at the context-changing timestep, as shown in Fig. 3. A significant advantage of this loss term is its simplicity; it integrates seamlessly into the standard OAD framework for OntAL extension without necessitating any additional modules or architectural modifications. Furthermore, the loss term can be easily implemented with just five lines of code (Algorithm 1), making it a practical and efficient solution.
|
| 115 |
+
|
| 116 |
+
With a ground truth state label $y_{t}$ , final loss term $\mathcal{L}$ for each timestep $t$ is defined as follows:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathcal {L} \left(p _ {t}, y _ {t}, s _ {t - 1}\right) = C E \left(p _ {t}, y _ {t}\right) + \alpha \mathcal {L} _ {c} \left(p _ {t}, s _ {t - 1}\right), \tag {2}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $CE$ refers to the typical cross entropy term and $\alpha$ denotes a weight to balance both losses. For training, we generate a framewise ground truth state label from the ground truth action instances (Encode Action in Fig. 3), assuming that switch 1 is first activated and then switch 2 to avoid ambiguity.
|
| 123 |
+
|
| 124 |
+
During the inference stage, we store $s_t$ for each timestep $t$ in a history queue. By comparing $s_t$ and $s_{t-1}$ , we can instantly infer the start and end timesteps. The history queue can then be decoded into action instances using a straightforward algorithm (Decode State in Fig. 2 (b) and 3), which we elaborate on in our supplementary material. This approach ensures "no room for boundary mismatches", as the predicted state label sequence corresponds to one clear action scenario, and hence does not need a separate boundary-matching module. It significantly differentiates our approach from other boundary-matching based algorithms [28, 29], which involve exhaustive matching between boundaries and filtering processes.
|
| 125 |
+
|
| 126 |
+
# 4 Experiments
|
| 127 |
+
|
| 128 |
+
In this section, we present the experimental results and ablation studies of our proposed method, mainly focusing on the On-TAL performance. Moreover, we include results from the ODAS benchmark for a comprehensive evaluation.
|
| 129 |
+
|
| 130 |
+
# 4.1 Datasets and Features
|
| 131 |
+
|
| 132 |
+
Datasets To validate the ActionSwitch framework's efficacy and general applicability, we evaluate our method on the variety of popular TAL datasets, including standard THUMOS14 [19], large-scale FineAction [32], and large egocentric Epic-Kitchens 100 [9]. Additional experimental results on the MUSES [30] dataset and a detailed analysis of each dataset in terms of appropriately evaluating On-TAL are provided in the supplementary materials.
|
| 133 |
+
|
| 134 |
+
Features For the THUMOS14 dataset, we adopt the two-stream TSN model [43] trained on Kinetics [5] to extract features, following [20, 23, 41, 47]. For the MUSES dataset, we use the officially available features for our experiments. However, in the FineAction dataset, the provided features are too coarse-grained (16 frames per one feature vector), which results in an insuperable bottleneck
|
| 135 |
+
|
| 136 |
+
for detecting fine-grained action instances. To address this issue, we follow [1] and increase the temporal resolution of the features by a factor of four. For the Epic-Kitchens 100 dataset, we used publicly available<sup>1</sup> Kinetics400 pretrained Slowfast [12] network weight with stride four to extract features. It is important to note that all experiments in the same table are conducted with the same features, whether for offline or online TAL methods to ensure a fair comparison.
|
| 137 |
+
|
| 138 |
+
# 4.2 Evaluation Metric
|
| 139 |
+
|
| 140 |
+
F1 score for class-agnostic evaluation In various TAL literatures [28, 29, 37, 39, 54], Average Recall (AR) under varying Intersection over Union (IoU) thresholds is used to evaluate class-agnostic action proposal generation. This metric is suited to standard TAL, where the output from proposal generators undergoes NMS-like processing to eliminate redundant detection. However in the On-TAL context, as discussed in Section 3.1, such retrospective modifications to generated proposals are not allowed, making it essential to assess both the precision and recall.
|
| 141 |
+
|
| 142 |
+
Therefore, we use the F1 score as the main evaluation metric in the class-agnostic On-TAL to capture the balance between precision and recall. We first run the Hungarian algorithm [26] to provide optimal bipartite matching between the ground truth action instances and predictions based on their temporal IoU (tIoU). Then, a prediction is considered a true positive based on whether it surpasses a certain tIoU threshold. Note that using Hungarian matching here is in the same context as using it in popular query-based object detection methods [4]. For the sake of concise demonstration, F1 and recall are mainly presented at a setting of tIOU=0.5. F1 and recall in other tIOU thresholds, straightforward pseudocode of the metric calculation, and interesting discussions about the F1 score and its relationship with standard classwise mAP metric can be found in the supplementary materials.
|
| 143 |
+
|
| 144 |
+
mAP metric for class-dependent evaluation To provide an apple-to/apple comparison among previous works, we also include the standard mAP metric, which has been widely used in both offline-TAL [47, 53] and online-TAL [20, 23] literature. We report mAPs with varying tIOUs in a set $\{0.3, 0.4, 0.5, 0.6, 0.7\}$ for THUMOS14 and $\{0.5, 0.75, 0.95\}$ for FineAction. An average value of those mAPs with multiple tIOUs is also reported for succinct comparison. For Epic-Kitchens 100 dataset, we evaluate mAP@0.5 using "noun" annotations, as they present a greater challenge compared to its verb annotations [53]. To evaluate the performance of ODAS, we measure the point-level average precision (p-AP) and calculate p-mAP by averaging p-AP over all action classes, following previous works [14, 20, 36]. Given that the ODAS task aims to achieve fast detection of action starts, we restrict the offset value to 3 seconds, as late detection beyond this threshold is considered not useful.
|
| 145 |
+
|
| 146 |
+
# 4.3 Implementation Details
|
| 147 |
+
|
| 148 |
+
Architecture Inspired by the latest research [1], a stacked GRU [7] and MLPs with residual connection are used for the unidirectional sequential encoder and a state classifier respectively. This efficient recurrent design enables our model to process over 500 fps, underscoring its suitability for online applications. Exploring effective architectural design choices is a promising direction, but we stick to the minimalist design in this paper as it is not our primary focus.
|
| 149 |
+
|
| 150 |
+
Class label While the ActionSwitch framework focuses on class-agnostic action instances and our main F1 metric does not require class and confidence scores for evaluation, the class labels and corresponding confidence scores are necessary to measure per class mAP. Hence, we train an extra classifier that takes feature sequences as its input and predicts the class labels for the given sequences. We take a vanilla transformer [42] as its architecture, and the max class logit is directly treated as the confidence score. To ensure a fair comparison, we apply this same classifier to previous class-agnostic methods, resulting in slightly better performance than reported in the original paper.
|
| 151 |
+
|
| 152 |
+
# 4.4 Main Results
|
| 153 |
+
|
| 154 |
+
Tab. 1, Tab. 2 and Tab. 3 present the main experimental results of our Action-Switch framework. For clarity, the performance of several state-of-the-art offline TAL methods is also reported. Here, the standard mAP metric is observed to have a stronger correlation with recall than precision (see supplementary materials for further discussion). Therefore in offline TAL methods, while using all action proposals without filtering boosts mAP scores, it becomes crucial to eliminate low-confidence proposals to achieve a reasonable F1 score.
|
| 155 |
+
|
| 156 |
+
While there is still a performance gap compared to the state-of-the-art offline TAL method [53], ActionSwitch sets a new standard for both mAP and F1 scores among OAD-extended On-TAL methods. Unlike CAG-QIL [20] which requires two-stage training and SimOn [41] which depends on an On-TAL specific architecture, ActionSwitch offers a straightforward extension from the OAD
|
| 157 |
+
|
| 158 |
+
Table 1: Comparison to other TAL methods on FineAction dataset [32]. We report mAP at different IoU thresholds and average mAP in [0.5:0.05:0.95]. $\dagger$ indicates the results with the optimal threshold that drops the low-confidence proposals and achieves the best F1 score. For each metric, the best is bolded.
|
| 159 |
+
|
| 160 |
+
<table><tr><td colspan="2">Setting</td><td>Method</td><td>F1</td><td>Recall</td><td>mAP@0.5</td><td>0.75</td><td>0.95</td><td>Avg.</td></tr><tr><td rowspan="2" colspan="2">Offline TAL</td><td>ActionFormer [53]</td><td>3.62</td><td>62.83</td><td>21.21</td><td>11.02</td><td>1.68</td><td>11.74</td></tr><tr><td>ActionFormer† [53]</td><td>30.14</td><td>25.25</td><td>14.13</td><td>9.05</td><td>1.57</td><td>9.16</td></tr><tr><td rowspan="5">Online TAL</td><td rowspan="2">TAL-Extension</td><td>OAT (downsampled by 4) [23]</td><td>18.64</td><td>38.10</td><td>8.88</td><td>1.56</td><td>0.02</td><td>3.04</td></tr><tr><td>OAT [23]</td><td>7.21</td><td>8.14</td><td>1.19</td><td>0.12</td><td>0.00</td><td>0.34</td></tr><tr><td rowspan="3">OAD-Extension</td><td>CAG-QIL [20]</td><td>15.67</td><td>12.73</td><td>8.00</td><td>4.07</td><td>1.05</td><td>4.45</td></tr><tr><td>SimOn [41]</td><td>10.53</td><td>18.06</td><td>7.95</td><td>3.53</td><td>0.98</td><td>4.12</td></tr><tr><td>ActionSwitch (Ours)</td><td>19.44</td><td>21.76</td><td>10.58</td><td>4.71</td><td>0.64</td><td>5.36</td></tr></table>
|
| 161 |
+
|
| 162 |
+
Table 2: Comparison on Epic-Kitchens 100 dataset [9]. # proposal denotes the number of generated proposals of each method, and # ground truth refers to the number of ground truth proposals. We report class-specific mAP at tiou = 0.5 using "noun" annotations. SimOn [41] and OAT [23] exploit class information for action instance generation, in contrast to OAD-Grouping [20], CAG-QIL [20], and our method, which do not. For each metric, the best is bolded.
|
| 163 |
+
|
| 164 |
+
<table><tr><td>Setting</td><td>Method</td><td>F1</td><td>precision</td><td>recall</td><td>mAP@0.5</td><td># proposal</td><td># ground truth</td></tr><tr><td rowspan="2">TAL-extension</td><td>OAT [23]</td><td>27.583</td><td>17.595</td><td>63.798</td><td>3.296</td><td>35054</td><td></td></tr><tr><td>CAG-QIL [20]</td><td>23.117</td><td>21.347</td><td>25.206</td><td>2.442</td><td>11416</td><td></td></tr><tr><td rowspan="3">OAD-extension</td><td>SimOn [41]</td><td>4.395</td><td>2.351</td><td>33.481</td><td>1.846</td><td>137685</td><td>9668</td></tr><tr><td>OAD-Grouping [20]</td><td>21.416</td><td>25.533</td><td>18.442</td><td>2.267</td><td>6983</td><td></td></tr><tr><td>ActionSwitch (Ours)</td><td>32.444</td><td>29.858</td><td>35.519</td><td>3.597</td><td>11501</td><td></td></tr></table>
|
| 165 |
+
|
| 166 |
+
Table 3: Comparison to other TAL methods on THUMOS14 dataset [19]. We report $m\mathrm{AP}$ at different tIoU thresholds and average $m\mathrm{AP}$ in [0.3:0.1:0.7]. In the offline TAL results, $\dagger$ indicates the results with the optimal threshold that drops the low-confidence proposals and achieves the best F1 score. OAT [23] performs On-TAL within a more flexible constraint than other works [20, 41]. (Section 3.1) For each metric, the best is bolded.
|
| 167 |
+
|
| 168 |
+
<table><tr><td colspan="2">Setting</td><td>Method</td><td>F1</td><td>Recall</td><td>mAP@0.3</td><td>0.4</td><td>0.5</td><td>0.6</td><td>0.7</td><td>Avg.</td></tr><tr><td rowspan="6" colspan="2">Offline TAL</td><td>G-TAD [47]</td><td>6.4</td><td>83.4</td><td>58.8</td><td>52.2</td><td>43.6</td><td>33.3</td><td>22.9</td><td>42.2</td></tr><tr><td>TadTR [31]</td><td>3.5</td><td>88.1</td><td>68.8</td><td>62.7</td><td>55.9</td><td>45.3</td><td>32.3</td><td>53.0</td></tr><tr><td>ActionFormer [53]</td><td>13.8</td><td>94.0</td><td>77.5</td><td>73.5</td><td>66.0</td><td>55.4</td><td>40.6</td><td>62.6</td></tr><tr><td>G-TAD† [47]</td><td>51.1</td><td>49.9</td><td>45.6</td><td>40.7</td><td>34.4</td><td>26.7</td><td>18.7</td><td>33.2</td></tr><tr><td>TadTR† [31]</td><td>68.4</td><td>63.0</td><td>55.4</td><td>51.4</td><td>46.5</td><td>38.4</td><td>28.0</td><td>43.9</td></tr><tr><td>ActionFormer† [53]</td><td>75.5</td><td>73.6</td><td>66.9</td><td>63.2</td><td>56.3</td><td>47.4</td><td>35.1</td><td>53.8</td></tr><tr><td rowspan="4">Online TAL</td><td rowspan="2">TAL-Extension</td><td>OAT [23]</td><td>62.9</td><td>70.3</td><td>64.1</td><td>57.4</td><td>47.8</td><td>36.7</td><td>20.3</td><td>45.3</td></tr><tr><td>CAG-QIL [20]</td><td>45.8</td><td>50.4</td><td>48.8</td><td>40.9</td><td>33.6</td><td>24.9</td><td>17.3</td><td>33.1</td></tr><tr><td rowspan="2">OAD-Extension</td><td>SimOn [41]</td><td>28.7</td><td>52.0</td><td>52.2</td><td>43.6</td><td>32.3</td><td>22.5</td><td>14.2</td><td>33.0</td></tr><tr><td>ActionSwitch (Ours)</td><td>53.2</td><td>60.1</td><td>57.2</td><td>50.8</td><td>41.7</td><td>30.5</td><td>21.3</td><td>40.3</td></tr></table>
|
| 169 |
+
|
| 170 |
+
framework to On-TAL with a minor modification of the output layer and incorporation of additional loss term. Notably as the number of classes grows, SimOn's class-aware grouping strategy significantly falters with the excessive number of proposals and low precision (Tab. 1 and 2), showing the limitations of classwise threshold-based grouping. In contrast, our method shows balanced precision and recall across all the datasets, and the use of the argmax operation streamlines the process by eliminating the need to predefine the threshold.
|
| 171 |
+
|
| 172 |
+
Furthermore, the TAL extension method, OAT [23], achieves impressive performance on relatively small datasets (e.g., THUMOS14) but experiences training instability when applied to large-scale datasets that contain temporally varying action instances. For example, in the FineAction dataset (Tab. 1), OAT [23] performs reasonably well only when the temporal resolution is downsampled, and even in that case, our ActionSwitch performs better.
|
| 173 |
+
|
| 174 |
+
Also on Epic-Kitchens 100 (Tab. 2), ActionSwitch surpasses OAT in both F1 and mAP scores, as OAT shows a stark imbalance between precision and recall.
|
| 175 |
+
|
| 176 |
+
Table 5: Ablation study of the number of states in ActionSwitch and the conservativeness loss whose weight is $\alpha$ as shown in Eq. 2.
|
| 177 |
+
|
| 178 |
+
<table><tr><td rowspan="2"># switch</td><td colspan="5">Themos14 [19]</td><td colspan="5">FineAction [32]</td></tr><tr><td>α</td><td>F1</td><td>Recall</td><td>Precision</td><td># proposal</td><td>α</td><td>F1</td><td>Recall</td><td>Precision</td><td># proposal</td></tr><tr><td rowspan="4">1</td><td>0.000</td><td>42.67</td><td>59.49</td><td>33.26</td><td>6006</td><td>0.000</td><td>14.53</td><td>14.71</td><td>14.35</td><td>24846</td></tr><tr><td>0.010</td><td>49.25</td><td>57.77</td><td>42.92</td><td>4519</td><td>0.005</td><td>15.78</td><td>14.45</td><td>17.37</td><td>20159</td></tr><tr><td>0.025</td><td>50.41</td><td>56.04</td><td>45.81</td><td>4108</td><td>0.010</td><td>16.53</td><td>13.28</td><td>21.90</td><td>14694</td></tr><tr><td>0.050</td><td>49.91</td><td>49.61</td><td>50.21</td><td>3318</td><td>0.025</td><td>9.34</td><td>6.28</td><td>18.17</td><td>8380</td></tr><tr><td rowspan="4">2</td><td>0.000</td><td>45.87</td><td>63.96</td><td>35.75</td><td>6007</td><td>0.000</td><td>17.57</td><td>22.36</td><td>14.46</td><td>37469</td></tr><tr><td>0.010</td><td>49.28</td><td>61.25</td><td>41.23</td><td>4989</td><td>0.005</td><td>18.32</td><td>22.00</td><td>15.70</td><td>33949</td></tr><tr><td>0.025</td><td>53.20</td><td>60.10</td><td>47.73</td><td>4228</td><td>0.010</td><td>19.44</td><td>21.76</td><td>17.56</td><td>29972</td></tr><tr><td>0.050</td><td>50.20</td><td>52.76</td><td>47.88</td><td>3701</td><td>0.025</td><td>18.71</td><td>16.76</td><td>21.15</td><td>19205</td></tr></table>
|
| 179 |
+
|
| 180 |
+
This is significant as our ActionSwitch also has the inherent ability to handle ODAS tasks, a functionality not supported by OAT. (Section 3.1)
|
| 181 |
+
|
| 182 |
+
We present the ODAS performance in Tab. 4, which shows that our method achieves a new state-of-the-art performance. Given that early detection is crucial in ODAS, our method's superior performance at lower offsets highlights its strength in scenarios where immediate action detection is vital. Note that reported mAP is much higher than those in the original papers [20, 41] as we utilized Kinetics pre-trained features in all three models.
|
| 183 |
+
|
| 184 |
+
Table 4: Comparison of other SOTA ODAS methods on THUMOS14 dataset [19].
|
| 185 |
+
|
| 186 |
+
<table><tr><td rowspan="2">ODAS Method</td><td colspan="3">Offsets</td></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>CAG-QIL [20]</td><td>28.30</td><td>42.86</td><td>50.12</td></tr><tr><td>SimOn [41]</td><td>31.45</td><td>46.22</td><td>54.11</td></tr><tr><td>ActionSwitch (Ours)</td><td>33.06</td><td>47.06</td><td>54.44</td></tr></table>
|
| 187 |
+
|
| 188 |
+
# 4.5 Ablation Studies
|
| 189 |
+
|
| 190 |
+
Number of switches and conservativeness loss Tab. 5 presents comprehensive ablation studies on our ActionSwitch setting. The 1-switch setting with $\alpha = 0$ is considered the baseline for our approach which is a naive OAD grouping extension. By adopting one additional switch, the model can capture overlapping action instances. The recall of the 2-switch setting is much higher than its 1-switch counterpart, supporting the aforementioned claim. This tendency is even more apparent in the FineAction dataset, which contains more overlapping action instances than THUMOS14.
|
| 191 |
+
|
| 192 |
+
However, the precision remains low due to the grouping error that is prone in OAD extension On-TAL approaches. Here, conservativeness loss significantly alleviates the grouping error. We observe a tradeoff relationship between precision and recall when applying varying alpha values. A higher $\alpha$ value indicates a stronger conservativeness prior, resulting in better precision but lower recall, and vice versa. An adequate $\alpha$ value leads to balanced precision and recall, leading to the best F1 value.
|
| 193 |
+
|
| 194 |
+
Table 6: Multiple switch configurations tested in Multithumos [51] dataset. Ablation study on conservativeness loss term $\mathcal{L}_c$ ( $\alpha = 0.025$ ) is also conducted in multi-switch settings. A good F1 score is observed when the number of generated predictions (# proposal) is close to the number of ground truth instances (# GT_proposal).
|
| 195 |
+
|
| 196 |
+
<table><tr><td>Method</td><td># switch Lc</td><td>F1</td><td>Precision</td><td>Recall #</td><td>proposal</td><td># GT_proposal</td></tr><tr><td rowspan="6">ActionSwitch (Ours)</td><td>1 o</td><td>16.51</td><td>42.85</td><td>10.22</td><td>4816</td><td rowspan="9">20186</td></tr><tr><td>2 o</td><td>30.82</td><td>34.37</td><td>27.94</td><td>16405</td></tr><tr><td>3 o</td><td>32.76</td><td>30.04</td><td>36.02</td><td>24211</td></tr><tr><td>4 o</td><td>29.25</td><td>22.67</td><td>41.21</td><td>36682</td></tr><tr><td>-3 x</td><td>26.63</td><td>-20.53</td><td>-37.91</td><td>-37287</td></tr><tr><td>4 x</td><td>23.16</td><td>16.02</td><td>41.86</td><td>52758</td></tr><tr><td>CAG-QIL [20]</td><td>-</td><td>22.10</td><td>48.7</td><td>14.29</td><td>5926</td></tr><tr><td>SimOn [41]</td><td>-</td><td>20.59</td><td>12.42</td><td>60.17</td><td>97759</td></tr><tr><td>OAT [23]</td><td>-</td><td>29.63</td><td>18.48</td><td>74.74</td><td>81626</td></tr></table>
|
| 197 |
+
|
| 198 |
+
Can we include 3 or more switches? Expanding our framework to incorporate three or more action switches is straightforward; it only requires adding additional states corresponding to the combination of switches. Yet, in most of the established TAL benchmark datasets [9,19,32], over $99\%$ of the frames have no more than two simultaneous actions, providing almost no ground truth data for scenarios where a third switch would be activated.
|
| 199 |
+
|
| 200 |
+
The Multithumos dataset [51], however, is characterized by significant overlap among multiple action instances, despite its smaller scale. Therefore, we chose this dataset to perform an ablation study on configurations with three or more switches, with the results presented in Tab. 6. The result not only shows that our method established a new state-of-the-art performance in the Multithumos dataset, but also highlights critical insights. Similar patterns to those in Tab. 5 emerge here, but due to the dataset's dense overlaps, the optimal F1 score occurs
|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
Fig. 4: Qualitative results of On-TAL models. If the action instances are overlapped, they are placed in other lines. We show ground-truth (GT) and the output of 2-state (2S), 4-state (4S), conservative loss (Cons), SimOn [41] and CAG-QIL [20]. Refer to Sec. 4.6 for an analysis of four cases (C1~C4) which are annotated by the red boxes.
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+
with a 3 switch setup. More switches enhance the model's ability to detect additional overlapping actions, as indicated by the improved recall. However, while introducing the conservativeness loss as an auxiliary term can mitigate the issues, it also leads to a precision trade-off. This becomes evident when the model overestimates action instances, as shown by comparing the number of predicted proposals with the actual ground truth counts.
|
| 208 |
+
|
| 209 |
+
# 4.6 Qualitative Results
|
| 210 |
+
|
| 211 |
+
In Fig. 4, we demonstrate the effectiveness of our proposed components. Specifically, C1 illustrates how the conservativeness loss effectively addresses the action fragmentation issue. Additionally, C2 and C3 demonstrate the ability of our ActionSwitch framework to detect overlapping instances. C3 shows that SimOn [41] can detect multiple overlapping actions, but its detection is restricted by the predefined action classes, thus preventing it from detecting overlapping instances of the same action class. In C4, the conservativeness loss leads the model to keep predicting state 1 instead of 3, preventing the excessive proposal generation.
|
| 212 |
+
|
| 213 |
+
# 5 Discussion and Conclusion
|
| 214 |
+
|
| 215 |
+
Towards an Open-Vocabulary Framework Existing On-TAL approaches are evaluated in a closed-vocabulary setting with predefined action classes. However, there is a growing interest in open-vocabulary research [17, 46, 56] as real-world scenarios need deeper comprehension beyond the identification of predefined actions and events in datasets. Class-aware On-TAL methods [23, 41] struggle in this context due to their reliance on preset classes. ActionSwitch, on the other hand, presents a versatile, class-agnostic framework that facilitates integration with video-language models [34]. It paves the path for expansive vocabulary classification, a promising direction for future research.
|
| 216 |
+
|
| 217 |
+
Is different instantiation possible? Instead of the state-emitting OAD approach, one can treat the output of the OAD model as a state transition, where 0 refers to maintaining the current state, while 1 and 2 indicate state transitions made by manipulating switch 1 and 2, respectively. While this formulation can reduce the required state number, it brings up severe class imbalance due to the sparsity of action boundaries. (Sec. 3.3) It will be promising future research to develop reinforcement learning algorithms [38] with this formulation.
|
| 218 |
+
|
| 219 |
+
# 6 Conclusion
|
| 220 |
+
|
| 221 |
+
We introduce ActionSwitch, the first On-TAL method capable of detecting overlapping action instances without relying on class information. Coupled with novel conservativeness loss, experiments on multiple datasets demonstrate its effectiveness despite its simplicity. We hope that our proposed ActionSwitch can serve as a strong baseline and inspire further On-TAL research.
|
| 222 |
+
|
| 223 |
+
Acknowledgement This work was supported by Institute of Information & communications Technology Planning & evaluation (IITP) grant funded by the Korea government (MSIT) (No.RS-2022-II220113, Developing a Sustainable Collaborative Multi-modal Lifelong Learning Framework), the National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (NRF-2022R1A2C2004509), and Artificial Intelligence Graduate School Program, Yonsei University, under Grant 2020-0-01361
|
| 224 |
+
|
| 225 |
+
# References
|
| 226 |
+
|
| 227 |
+
1. An, J., Kang, H., Han, S.H., Yang, M.H., Kim, S.J.: Miniroad: Minimal rnns framework for online action detection. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2023) 1, 4, 9, 10
|
| 228 |
+
2. Bar, A., Wang, X., Kantorov, V., Reed, C.J., Herzig, R., Chechik, G., Rohrbach, A., Darrell, T., Globerson, A.: Detreg: Unsupervised pretraining with region priors for object detection. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022) 5
|
| 229 |
+
3. Bodla, N., Singh, B., Chellappa, R., Davis, L.S.: Soft-nms - improving object detection with one line of code. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2017) 2
|
| 230 |
+
4. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: European Conference on Computer Vision (ECCV) (2020) 9
|
| 231 |
+
5. Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2017) 8
|
| 232 |
+
6. Chen, J., Mittal, G., Yu, Y., Kong, Y., Chen, M.: Gatehub: Gated history unit with background suppression for online action detection. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022) 4
|
| 233 |
+
7. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. In: NIPS 2014 Workshop on Deep Learning, December 2014 (2014) 10
|
| 234 |
+
8. Dai, J., Li, Y., He, K., Sun, J.: R-fcn: Object detection via region-based fully convolutional networks. Advances in Neural Information Processing Systems (NeurIPS) (2016) 4
|
| 235 |
+
9. Damen, D., Doughty, H., Farinella, G.M., Furnari, A., Kazakos, E., Ma, J., Moltisanti, D., Munro, J., Perrett, T., Price, W., et al.: Rescaling egocentric vision. arXiv preprint arXiv:2006.13256 (2020) 1, 2, 3, 4, 8, 11, 13
|
| 236 |
+
10. De Geest, R., Gavves, E., Ghodrati, A., Li, Z., Snoek, C., Tuytelaars, T.: Online action detection. In: European Conference on Computer Vision (ECCV) (2016) 1, 4
|
| 237 |
+
11. Eun, H., Moon, J., Park, J., Jung, C., Kim, C.: Learning to discriminate information for online action detection. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020) 4
|
| 238 |
+
12. Feichtenhofer, C., Fan, H., Malik, J., He, K.: Slowfast networks for video recognition. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2019) 9
|
| 239 |
+
13. Gao, J., Yang, Z., Nevatia, R.: Red: Reinforced encoder-decoder networks for action anticipation. The British Machine Vision Conference (BMVC) (2017) 1
|
| 240 |
+
|
| 241 |
+
14. Gao, M., Xu, M., Davis, L.S., Socher, R., Xiong, C.: Startnet: Online detection of action start in untrimmed videos. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2019) 4, 9
|
| 242 |
+
15. Gao, M., Zhou, Y., Xu, R., Socher, R., Xiong, C.: Woad: Weakly supervised online action detection in untrimmed videos. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2021) 1
|
| 243 |
+
16. Girshick, R.: Fast r-cnn. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2015) 4
|
| 244 |
+
17. Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. In: ICLR (2021) 14
|
| 245 |
+
18. Jaiswal, A., Wu, Y., Natarajan, P., Natarajan, P.: Class-agnostic object detection. In: IEEE Winter Conference on Applications of Computer Vision (WACV) (2021) 5
|
| 246 |
+
19. Jiang, Y.G., Liu, J., Roshan Zamir, A., Toderici, G., Laptev, I., Shah, M., Sukthankar, R.: THUMOS challenge: Action recognition with a large number of classes. http://crcv.ucf.edu/THUMOS14/ (2014) 3, 4, 8, 11, 12, 13
|
| 247 |
+
20. Kang, H., Kim, K., Ko, Y., Kim, S.J.: Cag-qil: Context-aware actionness grouping via q imitation learning for online temporal action localization. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2021) 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
|
| 248 |
+
21. Kim, D., Lin, T.Y., Angelova, A., Kweon, I.S., Kuo, W.: Learning open-world object proposals without learning to classify. IEEE Robotics and Automation Letters (2022) 5
|
| 249 |
+
22. Kim, J., Misu, T., Chen, Y.T., Tawari, A., Canny, J.: Grounding human-to-vehicle advice for self-driving vehicles. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2019) 1
|
| 250 |
+
23. Kim, Y.H., Kang, H., Kim, S.J.: A sliding window scheme for online temporal action localization. In: European Conference on Computer Vision (ECCV) (2022) 1, 4, 5, 7, 8, 9, 10, 11, 13, 14
|
| 251 |
+
24. Kim, Y.H., Nam, S., Kim, S.J.: Temporally smooth online action detection using cycle-consistent future anticipation. Pattern Recognition (2021) 1
|
| 252 |
+
25. Kim, Y.H., Nam, S., Kim, S.J.: 2pesnet: Towards online processing of temporal action localization. Pattern Recognition (2022) 1
|
| 253 |
+
26. Kuhn, H.W.: The hungarian method for the assignment problem. Naval research logistics quarterly (1955) 9
|
| 254 |
+
27. Lee, Y.: Scaling Robot Learning with Skills. Ph.D. thesis, University of Southern California, Viterbi School of Engineering (2022) 1
|
| 255 |
+
28. Lin, T., Liu, X., Li, X., Ding, E., Wen, S.: Bmn: Boundary-matching network for temporal action proposal generation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2019) 1, 4, 5, 8, 9
|
| 256 |
+
29. Lin, T., Zhao, X., Su, H., Wang, C., Yang, M.: Bsn: Boundary sensitive network for temporal action proposal generation. In: European Conference on Computer Vision (ECCV) (2018) 8, 9
|
| 257 |
+
30. Liu, X., Hu, Y., Bai, S., Ding, F., Bai, X., Torr, P.H.: Multi-shot temporal event localization: a benchmark. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2021) 4, 8
|
| 258 |
+
31. Liu, X., Wang, Q., Hu, Y., Tang, X., Zhang, S., Bai, S., Bai, X.: End-to-end temporal action detection with transformer. IEEE Transactions on Image Processing 31, 5427-5441 (2022) 1, 11
|
| 259 |
+
|
| 260 |
+
32. Liu, Y., Wang, L., Wang, Y., Ma, X., Qiao, Y.: Fineaction: A fine-grained video dataset for temporal action localization. IEEE Transactions on Image Processing 31, 6937-6950 (2022) 3, 4, 8, 10, 12, 13
|
| 261 |
+
33. Maaz, M., Rasheed, H., Khan, S., Khan, F.S., Anwer, R.M., Yang, M.H.: Class-agnostic object detection with multi-modal transformer. In: European Conference on Computer Vision (ECCV) (2022) 5
|
| 262 |
+
34. Rasheed, H., Khattak, M.U., Maaz, M., Khan, S., Khan, F.S.: Fine-tuned clip models are efficient video learners. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023) 5, 14
|
| 263 |
+
35. Shou, M.Z., Lei, S.W., Wang, W., Ghadiyaram, D., Feiszli, M.: Generic event boundary detection: A benchmark for event segmentation. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2021) 5
|
| 264 |
+
36. Shou, Z., Pan, J., Chan, J., Miyazawa, K., Mansour, H., Vetro, A., Giro-i Nieto, X., Chang, S.F.: Online detection of action start in untrimmed, streaming videos. In: European Conference on Computer Vision (ECCV) (2018) 4, 5, 9
|
| 265 |
+
37. Su, H., Gan, W., Wu, W., Qiao, Y., Yan, J.: Bsn++: Complementary boundary regressor with scale-balanced relation modeling for temporal action proposal generation. In: Association for the Advancement of Artificial Intelligence (AAAI) (2021) 1, 9
|
| 266 |
+
38. Sutton, R.S., Barto, A.G.: Reinforcement Learning: An Introduction (2018) 14
|
| 267 |
+
39. Tan, J., Tang, J., Wang, L., Wu, G.: Relaxed transformer decoders for direct action proposal generation. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2021) 9
|
| 268 |
+
40. Tan, M., Pang, R., Le, Q.V.: Efficientdet: Scalable and efficient object detection. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020) 4
|
| 269 |
+
41. Tang, T.N., Park, J., Kim, K., Sohn, K.: Simon: A simple framework for online temporal action localization. arXiv preprint arXiv:2211.04905 (2022) 1, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13, 14
|
| 270 |
+
42. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS) (2017) 10
|
| 271 |
+
43. Wang, L., Xiong, Y., Wang, Z., Qiao, Y., Lin, D., Tang, X., Van Gool, L.: Temporal segment networks: Towards good practices for deep action recognition. In: European Conference on Computer Vision (ECCV) (2016) 8
|
| 272 |
+
44. Wang, W., Feiszli, M., Wang, H., Malik, J., Tran, D.: Open-world instance segmentation: Exploiting pseudo ground truth from learned pairwise affinity. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022) 5
|
| 273 |
+
45. Wang, X., Zhang, S., Qing, Z., Shao, Y., Zuo, Z., Gao, C., Sang, N.: Oadtr: Online action detection with transformers. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2021) 1, 4, 5
|
| 274 |
+
46. Wu, X., Zhu, F., Zhao, R., Li, H.: Cora: Adapting clip for open-vocabulary detection with region prompting and anchor pre-matching. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 7031-7040 (2023) 14
|
| 275 |
+
47. Xu, M., Zhao, C., Rojas, D.S., Thabet, A., Ghanem, B.: G-tad: Sub-graph localization for temporal action detection. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020) 1, 4, 5, 8, 9, 11
|
| 276 |
+
48. Xu, M., Gao, M., Chen, Y.T., Davis, L.S., Crandall, D.J.: Temporal recurrent networks for online action detection. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2019) 1, 4
|
| 277 |
+
|
| 278 |
+
49. Xu, M., Xiong, Y., Chen, H., Li, X., Xia, W., Tu, Z., Soatto, S.: Long short-term transformer for online action detection. Advances in Neural Information Processing Systems (NeurIPS) (2021) 1, 4
|
| 279 |
+
50. Yang, L., Han, J., Zhang, D.: Colar: Effective and efficient online action detection by consulting exemplars. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022) 4
|
| 280 |
+
51. Yeung, S., Russakovsky, O., Jin, N., Andriluka, M., Mori, G., Fei-Fei, L.: Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision (IJCV) (2018) 2, 3, 4, 13
|
| 281 |
+
52. Zeng, R., Huang, W., Tan, M., Rong, Y., Zhao, P., Huang, J., Gan, C.: Graph convolutional networks for temporal action localization. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2019) 4
|
| 282 |
+
53. Zhang, C.L., Wu, J., Li, Y.: Actionformer: Localizing moments of actions with transformers. In: European Conference on Computer Vision (ECCV) (2022) 1, 9, 10, 11
|
| 283 |
+
54. Zhao, P., Xie, L., Ju, C., Zhang, Y., Wang, Y., Tian, Q.: Bottom-up temporal action localization with mutual regularization. In: European Conference on Computer Vision (ECCV) 9
|
| 284 |
+
55. Zhao, Y., Krahenbuhl, P.: Real-time online video detection with temporal smoothing transformers. In: European Conference on Computer Vision (ECCV) (2022) 4
|
| 285 |
+
56. Zhou, X., Girdhar, R., Joulin, A., Krahenbuhl, P., Misra, I.: Detecting twenty-thousand classes using image-level supervision. In: European Conference on Computer Vision (ECCV). pp. 350-368. Springer (2022) 14
|
actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e4cc156f7c24835a5f3d2886e65400c354f752c75ee3ac22b782b8401056b2f
|
| 3 |
+
size 440821
|
actionswitchclassagnosticdetectionofsimultaneousactionsinstreamingvideos/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:38f1973de3fb2c9ea2a33596a31ac1214a695f3ca181a1d1ae8b1cc30bc0db79
|
| 3 |
+
size 363707
|
actionvosactionsaspromptsforvideoobjectsegmentation/7dd79ec8-6b34-4132-a4ff-2ed3124bdaf0_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2643a54956139f0cd9e7211330fc33c1e7e8efcae8dcc8058c9041af0ee4c73e
|
| 3 |
+
size 93249
|
actionvosactionsaspromptsforvideoobjectsegmentation/7dd79ec8-6b34-4132-a4ff-2ed3124bdaf0_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e102bbe9a8a9f3870afa96179a8d9f296744acb981c5734352e253c2d8998e54
|
| 3 |
+
size 123248
|